code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase_( _A :Optional[int] , _A :Optional[int] , _A :Optional[Any]=8 )-> Tuple:
UpperCamelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if latents is None:
UpperCamelCase__ = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase__ = latents.to(lowercase__ )
UpperCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
lowercase__ , padding="max_length" , truncation=lowercase__ , max_length=77 , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" , )
UpperCamelCase__ = text_inputs.input_ids
UpperCamelCase__ = self.tokenizer(lowercase__ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ , lowercase__ ):
UpperCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ = text_input_ids.to(lowercase__ )
UpperCamelCase__ = text_inputs.attention_mask.to(lowercase__ )
UpperCamelCase__, UpperCamelCase__ = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
UpperCamelCase__ = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
UpperCamelCase__ = text_encoder_hidden_states.repeat_interleave(lowercase__ , dim=0 )
UpperCamelCase__ = text_mask.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = 42
if negative_prompt is None:
UpperCamelCase__ = [""] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !='''
F''' {type(lowercase__ )}.''' )
elif isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = self.tokenizer(
lowercase__ , padding="max_length" , max_length=77 , truncation=lowercase__ , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" , )
UpperCamelCase__ = uncond_input.input_ids.to(lowercase__ )
UpperCamelCase__ = uncond_input.attention_mask.to(lowercase__ )
UpperCamelCase__, UpperCamelCase__ = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = negative_prompt_embeds.shape[1]
UpperCamelCase__ = negative_prompt_embeds.repeat(1 , lowercase__ )
UpperCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ )
UpperCamelCase__ = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase__ = uncond_text_encoder_hidden_states.repeat(1 , lowercase__ , 1 )
UpperCamelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase__ , -1 )
UpperCamelCase__ = uncond_text_mask.repeat_interleave(lowercase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def snake_case__ ( self , snake_case=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def snake_case__ ( self , snake_case=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase__, UpperCamelCase__ = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
if self.safety_checker is not None:
UpperCamelCase__, UpperCamelCase__ = cpu_offload_with_hook(self.safety_checker , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ = 1
elif isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ = len(lowercase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}''' )
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = batch_size * num_images_per_prompt
UpperCamelCase__ = guidance_scale > 1.0
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self._encode_prompt(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ = torch.cat(lowercase__ , dim=0 )
if isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ = torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(lowercase__ , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
UpperCamelCase__ = self.scheduler.timesteps
UpperCamelCase__ = self.unet.config.in_channels
UpperCamelCase__, UpperCamelCase__ = get_new_h_w(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
UpperCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
UpperCamelCase__ = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
UpperCamelCase__, UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__, UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__, UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__, UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , ).prev_sample
# post-processing
UpperCamelCase__ = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 551 | import argparse
from collections import defaultdict
import yaml
_lowercase: List[Any] = '''docs/source/en/_toctree.yml'''
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = defaultdict(snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(snake_case )
_lowerCAmelCase = new_doc_list
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase = sorted(snake_case , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(snake_case )
# Sort
return overview_doc
def _lowerCamelCase ( snake_case=False ):
with open(snake_case , encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase = api_doc[scheduler_idx]['sections']
_lowerCAmelCase = clean_doc_toc(snake_case )
_lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _lowerCamelCase ( snake_case=False ):
with open(snake_case , encoding='utf-8' ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase = False
_lowerCAmelCase = api_doc[pipeline_idx]['sections']
_lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase = pipeline_doc['section']
_lowerCAmelCase = clean_doc_toc(snake_case )
if overwrite:
_lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case )
# sort overall pipeline doc
_lowerCAmelCase = clean_doc_toc(snake_case )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase: Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case__ = 10 , snake_case__ = 22 ) -> int:
__UpperCAmelCase =range(1 , snake_case__ )
__UpperCAmelCase =range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 142 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCamelCase_ = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ = None ) -> None:
__UpperCAmelCase =f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , snake_case__ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =requirement, None, None
else:
__UpperCAmelCase =re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , snake_case__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
__UpperCAmelCase , __UpperCAmelCase =match[0]
__UpperCAmelCase =want_full.split(''',''' ) # there could be multiple requirements
__UpperCAmelCase ={}
for w in want_range:
__UpperCAmelCase =re.findall(r'''^([\s!=<>]{1,2})(.+)''' , snake_case__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
__UpperCAmelCase , __UpperCAmelCase =match[0]
__UpperCAmelCase =want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__UpperCAmelCase ='''.'''.join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
__UpperCAmelCase =importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase ='''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(snake_case__ , snake_case__ )
| 142 | 1 |
"""simple docstring"""
a = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( _snake_case : float ) -> str:
'''simple docstring'''
assert type(_snake_case ) in (int, float) and decimal == int(_snake_case )
_A = int(_snake_case )
_A = ''
_A = False
if decimal < 0:
_A = True
decimal *= -1
while decimal > 0:
_A , _A = divmod(_snake_case , 16 )
_A = values[remainder] + hexadecimal
_A = '0x' + hexadecimal
if negative:
_A = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 65_536 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (32, 32, 64) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ) -> Dict:
'''simple docstring'''
super().__init__()
__lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
__lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
__lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCamelCase = block_out_channels[0] * 4
__lowerCamelCase = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = None
__lowerCamelCase = nn.ModuleList([] )
__lowerCamelCase = None
# down
__lowerCamelCase = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
__lowerCamelCase = output_channel
__lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCamelCase = i == len(lowerCamelCase__ ) - 1
__lowerCamelCase = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
__lowerCamelCase = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
__lowerCamelCase = list(reversed(lowerCamelCase__ ) )
__lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCamelCase = out_channels
else:
__lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
__lowerCamelCase = output_channel
__lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
__lowerCamelCase = i == len(lowerCamelCase__ ) - 1
__lowerCamelCase = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
__lowerCamelCase = output_channel
# out
__lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__lowerCamelCase = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(sample.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
__lowerCamelCase = self.time_mlp(lowerCamelCase__ )
else:
__lowerCamelCase = timestep_embed[..., None]
__lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCamelCase = ()
for downsample_block in self.down_blocks:
__lowerCamelCase , __lowerCamelCase = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCamelCase = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCamelCase = down_block_res_samples[-1:]
__lowerCamelCase = down_block_res_samples[:-1]
__lowerCamelCase = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
__lowerCamelCase = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 469 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = "swinv2"
snake_case__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int=2_2_4 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Any=9_6 , SCREAMING_SNAKE_CASE__ : Tuple=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : int=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=4.0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1E-5 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , **SCREAMING_SNAKE_CASE__ : Any , ) -> Union[str, Any]:
super().__init__(**A_ )
a_ : str = image_size
a_ : List[str] = patch_size
a_ : List[str] = num_channels
a_ : List[Any] = embed_dim
a_ : Tuple = depths
a_ : str = len(A_ )
a_ : List[str] = num_heads
a_ : int = window_size
a_ : List[str] = mlp_ratio
a_ : Tuple = qkv_bias
a_ : Optional[Any] = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : Any = drop_path_rate
a_ : Optional[int] = hidden_act
a_ : Union[str, Any] = use_absolute_embeddings
a_ : Optional[Any] = layer_norm_eps
a_ : List[Any] = initializer_range
a_ : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a_ : Optional[int] = int(embed_dim * 2 ** (len(A_ ) - 1) )
a_ : Tuple = (0, 0, 0, 0)
| 703 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : int = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
UpperCAmelCase_ : Tuple = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
UpperCAmelCase_ : Optional[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Any , __A : List[Any]=False , __A : Tuple=False , __A : Any=True , __A : Any=False , __A : Any="dummy_doc" ) -> List[str]:
"""simple docstring"""
a_ : List[str] = {doc: key_lines}
a_ : Optional[Any] = {doc: sys_lines}
a_ : List[Any] = {}
a_ : Tuple = 0
a_ : List[str] = 0
a_ : Union[str, Any] = 0
a_ : List[Any] = 0
a_ : List[str] = 0
a_ : Union[str, Any] = 0
a_ , a_ : List[Any] = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A )
key_singletons_num += singletons_num
if NP_only or min_span:
a_ : Union[str, Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
a_ , a_ : Union[str, Any] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A )
sys_singletons_num += singletons_num
if NP_only or min_span:
a_ : Dict = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
if remove_nested:
a_ , a_ : Optional[Any] = reader.remove_nested_coref_mentions(__A , __A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a_ , a_ : Optional[Any] = reader.remove_nested_coref_mentions(__A , __A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a_ : int = reader.get_mention_assignments(__A , __A )
a_ : List[Any] = reader.get_mention_assignments(__A , __A )
a_ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] , __A : Optional[Any] , __A : Optional[int] , __A : Tuple , __A : Dict , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
a_ : int = get_coref_infos(__A , __A , __A , __A , __A , __A )
a_ : List[Any] = {}
a_ : int = 0
a_ : Optional[int] = 0
for name, metric in metrics:
a_ , a_ , a_ : Tuple = evaluator.evaluate_documents(__A , __A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
a_ : List[str] = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> Dict:
"""simple docstring"""
a_ : List[Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a_ : List[Any] = line.split()[5]
if not parse_col == "-":
a_ : List[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False ) -> Tuple:
a_ : List[str] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a_ : Union[str, Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a_ : List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , )
return score
| 443 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_SCREAMING_SNAKE_CASE = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
_SCREAMING_SNAKE_CASE = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ConvBertTokenizer
def __init__( self : Any , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : Optional[Any]="[SEP]" , snake_case_ : Union[str, Any]="[PAD]" , snake_case_ : Any="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
__snake_case = getattr(snake_case_ , normalizer_state.pop("type" ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**snake_case_ )
__snake_case = do_lower_case
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int]=None ):
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
__snake_case = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 163 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 1 |
def a__ ( lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : str = [int(lowerCAmelCase__ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase__ ) == 4 and all(0 <= int(lowerCAmelCase__ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
UpperCamelCase__ = input().strip()
UpperCamelCase__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 709 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = 8.9_88e9 # units = N * m^s * C^-2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict[str, float]:
UpperCAmelCase__ : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
UpperCAmelCase__ : Optional[int] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCAmelCase__ : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
_SCREAMING_SNAKE_CASE = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Optional[int]=2_56 ) -> Optional[int]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
with open(__lowerCAmelCase , """r""" ) as f:
return json.load(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str ) -> Optional[Any]:
with open(__lowerCAmelCase , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case = os.path.join(__lowerCAmelCase , """tmp""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case = read_json(os.path.join(__lowerCAmelCase , """params.json""" ) )
snake_case = NUM_SHARDS[model_size]
snake_case = params["""n_layers"""]
snake_case = params["""n_heads"""]
snake_case = n_heads // num_shards
snake_case = params["""dim"""]
snake_case = dim // n_heads
snake_case = 1_0000.0
snake_case = 1.0 / (base ** (torch.arange(0 , __lowerCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case = params["""n_kv_heads"""] # for GQA / MQA
snake_case = n_heads_per_shard // num_key_value_heads
snake_case = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case = n_heads
snake_case = n_heads_per_shard
snake_case = dim
# permute for sliced rotary
def permute(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=n_heads , __lowerCAmelCase : str=dim , __lowerCAmelCase : Dict=dim ):
return w.view(__lowerCAmelCase , dima // n_heads // 2 , 2 , __lowerCAmelCase ).transpose(1 , 2 ).reshape(__lowerCAmelCase , __lowerCAmelCase )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case = torch.load(os.path.join(__lowerCAmelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
snake_case = [
torch.load(os.path.join(__lowerCAmelCase , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(__lowerCAmelCase )
]
snake_case = 0
snake_case = {"""weight_map""": {}}
for layer_i in range(__lowerCAmelCase ):
snake_case = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i in range(__lowerCAmelCase )
] , dim=0 , ).reshape(__lowerCAmelCase , __lowerCAmelCase ) )
snake_case = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i in range(__lowerCAmelCase )
] , dim=0 , ).reshape(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
snake_case = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for i in range(__lowerCAmelCase )
] , dim=0 , ).reshape(__lowerCAmelCase , __lowerCAmelCase )
snake_case = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(__lowerCAmelCase )] , dim=1 )
snake_case = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__lowerCAmelCase )] , dim=0 )
snake_case = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__lowerCAmelCase )] , dim=1 )
snake_case = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__lowerCAmelCase )] , dim=0 )
snake_case = inv_freq
for k, v in state_dict.items():
snake_case = filename
param_count += v.numel()
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
snake_case = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
snake_case = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCAmelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case = filename
param_count += v.numel()
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
# Write configs
snake_case = {"""total_size""": param_count * 2}
write_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """pytorch_model.bin.index.json""" ) )
snake_case = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
snake_case = params["""multiple_of"""] if """multiple_of""" in params else 2_56
snake_case = LlamaConfig(
hidden_size=__lowerCAmelCase , intermediate_size=compute_intermediate_size(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCAmelCase , )
config.save_pretrained(__lowerCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
snake_case = LlamaForCausalLM.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCAmelCase , safe_serialization=__lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> List[str]:
# Initialize the tokenizer based on the `spm` model
snake_case = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case = tokenizer_class(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
def __lowerCamelCase ( ) -> str:
snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCAmelCase , help="""Whether or not to save using `safetensors`.""" )
snake_case = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 369 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "trajectory_transformer"
snake_case_ = ["past_key_values"]
snake_case_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __snake_case : int=1_00 , __snake_case : List[str]=5 , __snake_case : str=1 , __snake_case : Union[str, Any]=1 , __snake_case : Union[str, Any]=2_49 , __snake_case : List[Any]=6 , __snake_case : Optional[int]=17 , __snake_case : Optional[Any]=25 , __snake_case : Union[str, Any]=4 , __snake_case : List[Any]=4 , __snake_case : Optional[int]=1_28 , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=0.00_06 , __snake_case : Tuple=5_12 , __snake_case : int=0.02 , __snake_case : Any=1e-12 , __snake_case : Optional[Any]=1 , __snake_case : List[Any]=True , __snake_case : Dict=1 , __snake_case : Dict=5_02_56 , __snake_case : Union[str, Any]=5_02_56 , **__snake_case : Tuple , )-> Optional[int]:
snake_case = vocab_size
snake_case = action_weight
snake_case = reward_weight
snake_case = value_weight
snake_case = max_position_embeddings
snake_case = block_size
snake_case = action_dim
snake_case = observation_dim
snake_case = transition_dim
snake_case = learning_rate
snake_case = n_layer
snake_case = n_head
snake_case = n_embd
snake_case = embd_pdrop
snake_case = attn_pdrop
snake_case = resid_pdrop
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = kaiming_initializer_range
snake_case = use_cache
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 369 | 1 |
import numpy as np
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : List[str] = (0, 0)
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = 0
def __eq__( self: Any ,lowerCamelCase_: str ) -> Optional[int]:
return self.position == cell.position
def A__ ( self: str ) -> Union[str, Any]:
print(self.position )
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[Any] ,lowerCamelCase_: Dict=(5, 5) ) -> int:
UpperCAmelCase_ : str = np.zeros(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = world_size[0]
UpperCAmelCase_ : Tuple = world_size[1]
def A__ ( self: Optional[int] ) -> Optional[Any]:
print(self.w )
def A__ ( self: int ,lowerCamelCase_: Any ) -> Any:
UpperCAmelCase_ : str = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ : int = cell.position[0]
UpperCAmelCase_ : List[Any] = cell.position[1]
UpperCAmelCase_ : Optional[int] = []
for n in neughbour_cord:
UpperCAmelCase_ : Optional[Any] = current_x + n[0]
UpperCAmelCase_ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ : Dict = Cell()
UpperCAmelCase_ : Optional[Any] = (x, y)
UpperCAmelCase_ : List[str] = cell
neighbours.append(lowerCamelCase_ )
return neighbours
def lowerCamelCase_ ( _a : Any , _a : int , _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Optional[int] = []
_open.append(_a )
while _open:
UpperCAmelCase_ : Dict = np.argmin([n.f for n in _open] )
UpperCAmelCase_ : Optional[int] = _open[min_f]
_closed.append(_open.pop(_a ) )
if current == goal:
break
for n in world.get_neigbours(_a ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ : Dict = current.g + 1
UpperCAmelCase_ , UpperCAmelCase_ : str = n.position
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = goal.position
UpperCAmelCase_ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ : Union[str, Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_a )
UpperCAmelCase_ : Any = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCamelCase_ = Gridworld()
# Start position and goal
UpperCamelCase_ = Cell()
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = Cell()
UpperCamelCase_ = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCamelCase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCamelCase_ = 1
print(world.w)
| 322 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 50000
UpperCamelCase_ = 5000
UpperCamelCase_ ,UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : int ):
'''simple docstring'''
for i in range(_a ):
UpperCAmelCase_ : List[str] = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : int ):
'''simple docstring'''
for i in range(0 , len(_a ) , _a ):
UpperCAmelCase_ : Any = dataset[i : i + batch_size]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : str ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(_a ):
UpperCAmelCase_ : int = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Optional[Any] , _a : Tuple , _a : Optional[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(0 , _a , _a ):
UpperCAmelCase_ : Any = dataset[i : i + batch_size]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ : List[str] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase_ : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase_ : Optional[Any] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase_ : Optional[Any] = generate_example_dataset(
os.path.join(_a , """dataset.arrow""" ) , _a , num_examples=_a , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(_a ) )
UpperCAmelCase_ : str = func(_a , **_a )
print("""shuffling dataset""" )
UpperCAmelCase_ : int = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(_a ) )
UpperCAmelCase_ : Any = func(
_a , **_a )
with open(_a , """wb""" ) as f:
f.write(json.dumps(_a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 322 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase__ = Features({"""text""": Value("""string""" )} )
lowerCAmelCase__ = Features({"""labels""": ClassLabel} )
lowerCAmelCase__ = """text"""
lowerCAmelCase__ = """labels"""
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE_):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""")
__lowercase =copy.deepcopy(self)
__lowercase =self.label_schema.copy()
__lowercase =features[self.label_column]
__lowercase =label_schema
return task_template
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 474 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
A_ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
A_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = RealmTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = text
lowerCamelCase_ = kwargs.pop('text_pair' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('return_tensors' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE_ ):
if batch_text_pair is not None:
lowerCamelCase_ = batch_text_pair[idx]
else:
lowerCamelCase_ = None
lowerCamelCase_ = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoded_candidates.get('input_ids' )
lowerCamelCase_ = encoded_candidates.get('attention_mask' )
lowerCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = {key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE_ ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 | 0 |
from torch import nn
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 701 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str]=2 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: int = 10 , __lowerCamelCase: int = 2 ):
'''simple docstring'''
def get_dataset(__lowerCamelCase: List[Any] ):
lowercase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=None ):
'''simple docstring'''
lowercase_ = []
for epoch in range(__lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ , lowercase_ = batch
lowercase_ = model(__lowerCamelCase )
lowercase_ = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
accelerator.backward(__lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.randn(1 ) )
lowercase_ = nn.Parameter(torch.randn(1 ) )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return x * self.a + self.b
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(total_limit=1 , project_dir=UpperCAmelCase , automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
# Train baseline
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
lowercase_ = os.path.join(UpperCAmelCase , "initial" )
accelerator.save_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
lowercase_ = os.path.join(UpperCAmelCase , "checkpoint" )
accelerator.save_state(UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCAmelCase )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCAmelCase )
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.tensor([1, 2, 3] )
lowercase_ = torch.tensor([2, 3, 4] )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(net.parameters() )
lowercase_ = Accelerator()
with self.assertRaises(UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = torch.optim.lr_scheduler.StepLR(UpperCAmelCase , step_size=1 , gamma=0.99 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
lowercase_ = scheduler.state_dict()
train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(UpperCAmelCase , scheduler.state_dict() )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase , total_limit=2 )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ = accelerator.prepare(UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 601 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_02_17_66_34E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(__UpperCamelCase )}"
)
raise ValueError(__UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """ViltImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 301 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_lowercase : Union[str, Any] =logging.getLogger(__name__)
def __UpperCAmelCase ( ) -> Optional[int]:
snake_case__ : Optional[int] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=UpperCamelCase__ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=UpperCamelCase__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=UpperCamelCase__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=UpperCamelCase__ , default='''data/dump''' , help='''The dump file prefix.''' )
snake_case__ : int = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
snake_case__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
snake_case__ : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
snake_case__ : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
snake_case__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
snake_case__ : Any = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
snake_case__ : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
snake_case__ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
snake_case__ : Union[str, Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
snake_case__ : Dict = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
snake_case__ : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(UpperCamelCase__ )} examples to process.''' )
snake_case__ : Tuple = []
snake_case__ : Dict = 0
snake_case__ : int = 1_0000
snake_case__ : str = time.time()
for text in data:
snake_case__ : Union[str, Any] = F'''{bos} {text.strip()} {sep}'''
snake_case__ : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
snake_case__ : Tuple = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
snake_case__ : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(UpperCamelCase__ )} examples processed.''' )
snake_case__ : Any = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
snake_case__ : Optional[int] = tokenizer.vocab_size
if vocab_size < (1 << 16):
snake_case__ : str = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
snake_case__ : List[str] = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(UpperCamelCase__ , '''wb''' ) as handle:
pickle.dump(rslt_ , UpperCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowercase : Any ="Usage of script: script_name <size_of_canvas:int>"
_lowercase : Optional[Any] =[0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> list[list[bool]]:
snake_case__ : List[str] = [[False for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
return canvas
def __UpperCAmelCase ( UpperCamelCase__ :list[list[bool]] ) -> None:
for i, row in enumerate(UpperCamelCase__ ):
for j, _ in enumerate(UpperCamelCase__ ):
snake_case__ : Optional[Any] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( UpperCamelCase__ :list[list[bool]] ) -> list[list[bool]]:
snake_case__ : Any = np.array(UpperCamelCase__ )
snake_case__ : Union[str, Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCamelCase__ ):
for c, pt in enumerate(UpperCamelCase__ ):
snake_case__ : List[str] = __judge_point(
UpperCamelCase__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
snake_case__ : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
snake_case__ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( UpperCamelCase__ :bool , UpperCamelCase__ :list[list[bool]] ) -> bool:
snake_case__ : int = 0
snake_case__ : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
snake_case__ : int = pt
if pt:
if alive < 2:
snake_case__ : int = False
elif alive == 2 or alive == 3:
snake_case__ : Dict = True
elif alive > 3:
snake_case__ : Optional[Any] = False
else:
if alive == 3:
snake_case__ : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowercase : int =int(sys.argv[1])
# main working structure of this module.
_lowercase : Union[str, Any] =create_canvas(canvas_size)
seed(c)
_lowercase , _lowercase : Tuple =plt.subplots()
fig.show()
_lowercase : List[Any] =ListedColormap(["w", "k"])
try:
while True:
_lowercase : Optional[int] =run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 574 | 0 |
'''simple docstring'''
import math
import qiskit
def A_( A : int = 1 , A : int = 1 , A : int = 1):
if (
isinstance(A , A)
or isinstance(A , A)
or isinstance(A , A)
):
raise TypeError('inputs must be integers.')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.')
if (
(math.floor(A) != input_a)
or (math.floor(A) != input_a)
or (math.floor(A) != carry_in)
):
raise ValueError('inputs must be exact integers.')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.')
# build registers
UpperCamelCase = qiskit.QuantumRegister(4 , 'qr')
UpperCamelCase = qiskit.ClassicalRegister(2 , 'cr')
# list the entries
UpperCamelCase = [input_a, input_a, carry_in]
UpperCamelCase = qiskit.QuantumCircuit(A , A)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(A) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , A) # measure the last two qbits
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator')
UpperCamelCase = qiskit.execute(A , A , shots=1000)
return job.result().get_counts(A)
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 3 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
UpperCAmelCase_ : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 367 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] ,lowercase__ : int = 1_2_8 ,lowercase__ : int = 2_5_6 ,lowercase__ : float = 2_0_0_0.0 ,lowercase__ : int = 7_6_8 ,lowercase__ : int = 1_2 ,lowercase__ : int = 1_2 ,lowercase__ : int = 6_4 ,lowercase__ : int = 2_0_4_8 ,lowercase__ : float = 0.1 ,):
super().__init__()
__lowercase = nn.Sequential(
nn.Linear(lowercase__ ,d_model * 4 ,bias=lowercase__ ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=lowercase__ ) ,nn.SiLU() ,)
__lowercase = nn.Embedding(lowercase__ ,lowercase__ )
__lowercase = False
__lowercase = nn.Linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
__lowercase = nn.Dropout(p=lowercase__ )
__lowercase = nn.ModuleList()
for lyr_num in range(lowercase__ ):
# FiLM conditional T5 decoder
__lowercase = DecoderLayer(d_model=lowercase__ ,d_kv=lowercase__ ,num_heads=lowercase__ ,d_ff=lowercase__ ,dropout_rate=lowercase__ )
self.decoders.append(lowercase__ )
__lowercase = TaLayerNorm(lowercase__ )
__lowercase = nn.Dropout(p=lowercase__ )
__lowercase = nn.Linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ):
__lowercase = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ):
__lowercase , __lowercase , __lowercase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowercase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
__lowercase = self.conditioning_emb(lowercase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowercase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowercase = torch.broadcast_to(
torch.arange(lowercase__ ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
__lowercase = self.position_encoding(lowercase__ )
__lowercase = self.continuous_inputs_projection(lowercase__ )
inputs += position_encodings
__lowercase = self.dropout(lowercase__ )
# decoder: No padding present.
__lowercase = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowercase = [(x, self.encoder_decoder_mask(lowercase__ ,lowercase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
__lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
__lowercase = lyr(
lowercase__ ,conditioning_emb=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)[0]
__lowercase = self.decoder_norm(lowercase__ )
__lowercase = self.post_dropout(lowercase__ )
__lowercase = self.spec_out(lowercase__ )
return spec_out
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : List[Any]=1e-6 ):
super().__init__()
__lowercase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase__ ,d_kv=lowercase__ ,num_heads=lowercase__ ,dropout_rate=lowercase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase__ ,d_kv=lowercase__ ,num_heads=lowercase__ ,dropout_rate=lowercase__ ,layer_norm_epsilon=lowercase__ ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase__ ,d_ff=lowercase__ ,dropout_rate=lowercase__ ,layer_norm_epsilon=lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple=None ,lowercase__ : List[Any]=None ,lowercase__ : Any=None ,lowercase__ : Tuple=None ,lowercase__ : Tuple=None ,):
__lowercase = self.layer[0](
lowercase__ ,conditioning_emb=lowercase__ ,attention_mask=lowercase__ ,)
if encoder_hidden_states is not None:
__lowercase = torch.where(encoder_attention_mask > 0 ,0 ,-1e1_0 ).to(
encoder_hidden_states.dtype )
__lowercase = self.layer[1](
lowercase__ ,key_value_states=lowercase__ ,attention_mask=lowercase__ ,)
# Apply Film Conditional Feed Forward layer
__lowercase = self.layer[-1](lowercase__ ,lowercase__ )
return (hidden_states,)
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Optional[int] ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Optional[Any] ):
super().__init__()
__lowercase = TaLayerNorm(lowercase__ )
__lowercase = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowercase__ )
__lowercase = Attention(query_dim=lowercase__ ,heads=lowercase__ ,dim_head=lowercase__ ,out_bias=lowercase__ ,scale_qk=lowercase__ )
__lowercase = nn.Dropout(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : int=None ,lowercase__ : List[Any]=None ,):
# pre_self_attention_layer_norm
__lowercase = self.layer_norm(lowercase__ )
if conditioning_emb is not None:
__lowercase = self.FiLMLayer(lowercase__ ,lowercase__ )
# Self-attention block
__lowercase = self.attention(lowercase__ )
__lowercase = hidden_states + self.dropout(lowercase__ )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Any ):
super().__init__()
__lowercase = Attention(query_dim=lowercase__ ,heads=lowercase__ ,dim_head=lowercase__ ,out_bias=lowercase__ ,scale_qk=lowercase__ )
__lowercase = TaLayerNorm(lowercase__ ,eps=lowercase__ )
__lowercase = nn.Dropout(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Any ,lowercase__ : Dict=None ,lowercase__ : Any=None ,):
__lowercase = self.layer_norm(lowercase__ )
__lowercase = self.attention(
lowercase__ ,encoder_hidden_states=lowercase__ ,attention_mask=attention_mask.squeeze(1 ) ,)
__lowercase = hidden_states + self.dropout(lowercase__ )
return layer_output
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : int ):
super().__init__()
__lowercase = TaDenseGatedActDense(d_model=lowercase__ ,d_ff=lowercase__ ,dropout_rate=lowercase__ )
__lowercase = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowercase__ )
__lowercase = TaLayerNorm(lowercase__ ,eps=lowercase__ )
__lowercase = nn.Dropout(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Tuple=None ):
__lowercase = self.layer_norm(lowercase__ )
if conditioning_emb is not None:
__lowercase = self.film(lowercase__ ,lowercase__ )
__lowercase = self.DenseReluDense(lowercase__ )
__lowercase = hidden_states + self.dropout(lowercase__ )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ):
super().__init__()
__lowercase = nn.Linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
__lowercase = nn.Linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
__lowercase = nn.Linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
__lowercase = nn.Dropout(lowercase__ )
__lowercase = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
__lowercase = self.act(self.wi_a(lowercase__ ) )
__lowercase = self.wi_a(lowercase__ )
__lowercase = hidden_gelu * hidden_linear
__lowercase = self.dropout(lowercase__ )
__lowercase = self.wo(lowercase__ )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Any ,lowercase__ : str=1e-6 ):
super().__init__()
__lowercase = nn.Parameter(torch.ones(lowercase__ ) )
__lowercase = eps
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=lowercase__ )
__lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowercase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowercase__ ,3.0 )) ))
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ):
super().__init__()
__lowercase = nn.Linear(lowercase__ ,out_features * 2 ,bias=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : Tuple ):
__lowercase = self.scale_bias(lowercase__ )
__lowercase , __lowercase = torch.chunk(lowercase__ ,2 ,-1 )
__lowercase = x * (1 + scale) + shift
return x
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def _lowerCamelCase ( __A : Tuple , __A : int ) -> str | None:
_UpperCAmelCase : str = ""
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE_ )
return decoded
def _lowerCamelCase ( __A : Tuple ) -> list[str]:
_UpperCAmelCase : list[str] = []
for key in product(SCREAMING_SNAKE_CASE_ , repeat=3 ):
_UpperCAmelCase : Tuple = try_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE_ )
return possibles
def _lowerCamelCase ( __A : Optional[Any] , __A : Dict ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCamelCase ( __A : int = "p059_cipher.txt" ) -> int:
_UpperCAmelCase : list[int]
_UpperCAmelCase : list[str]
_UpperCAmelCase : str
_UpperCAmelCase : str
_UpperCAmelCase : str = Path(SCREAMING_SNAKE_CASE_ ).parent.joinpath(SCREAMING_SNAKE_CASE_ ).read_text(encoding='''utf-8''' )
_UpperCAmelCase : int = [int(SCREAMING_SNAKE_CASE_ ) for number in data.strip().split(''',''' )]
_UpperCAmelCase : str = filter_valid_chars(SCREAMING_SNAKE_CASE_ )
for common_word in COMMON_WORDS:
_UpperCAmelCase : Union[str, Any] = filter_common_word(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
_UpperCAmelCase : List[str] = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 703 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE = [0, 25, 50]
SCREAMING_SNAKE_CASE = [25, 50, 75]
SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE = np.ones(75)
SCREAMING_SNAKE_CASE = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 186 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
create_all_state(1 , UpperCamelCase_ , UpperCamelCase_ , [] , UpperCamelCase_ )
return result
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ , total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 , UpperCamelCase_ , level - 1 , UpperCamelCase_ , UpperCamelCase_ )
current_list.pop()
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 673 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _lowerCAmelCase ( UpperCamelCase_ ):
if hor == 128:
__SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE = (32, 128, 256)
__SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
__SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__SCREAMING_SNAKE_CASE = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__SCREAMING_SNAKE_CASE = model.state_dict()
__SCREAMING_SNAKE_CASE = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCamelCase_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
hf_value_function.load_state_dict(UpperCamelCase_ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCamelCase_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
hf_value_function.load_state_dict(UpperCamelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 155 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self :Optional[Any] ,__lowercase :Any ,__lowercase :Dict=1_3 ,__lowercase :int=3 ,__lowercase :List[Any]=2_2_4 ,__lowercase :Optional[int]=3_0 ,__lowercase :List[Any]=4_0_0 ,__lowercase :Union[str, Any]=True ,__lowercase :List[str]=None ,__lowercase :List[Any]=True ,__lowercase :Optional[Any]=[0.5, 0.5, 0.5] ,__lowercase :Any=[0.5, 0.5, 0.5] ,):
snake_case__ : List[Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
snake_case__ : Any = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : str = num_channels
snake_case__ : str = image_size
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Optional[int] = do_resize
snake_case__ : Union[str, Any] = size
snake_case__ : List[Any] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : List[str] = image_std
def __lowerCamelCase ( self :Any ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.image_proc_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_resize''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
def __lowerCamelCase ( self :Optional[int] ):
pass
def __lowerCamelCase ( self :Any ):
# Initialize image_processor
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[str] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : List[str] = image_processor(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) ,)
# Test batched
snake_case__ : Union[str, Any] = image_processor(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processor
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Dict = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Optional[int] = image_processor(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) ,)
# Test batched
snake_case__ : Tuple = image_processor(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : str = image_processor(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) ,)
# Test batched
snake_case__ : List[Any] = image_processor(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) ,)
| 219 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCAmelCase ( __lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
snake_case__ : Any = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : int = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Tuple = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : str = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : int = defaults.commands
if not args.tpu_name:
snake_case__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : List[str] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : List[str] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Dict = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : Optional[int] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = tpu_command_parser()
snake_case__ : int = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 219 | 1 |
from functools import reduce
_a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase__(__snake_case = N ) -> Any:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __snake_case ,__snake_case : str(int(a_ ) * int(a_ ) ) ,n[i : i + 13] ) )
for i in range(len(a_ ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 481 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase = random.Random()
if is_torch_available():
import torch
def __UpperCAmelCase ( a_ , a_=1.0 , a_=None , a_=None):
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=7 , a=4_00 , a=20_00 , a=1 , a=0.0 , a=1_60_00 , a=True , a=True , ) -> Tuple:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def _UpperCamelCase ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self , a=False , a=False ) -> Optional[int]:
def _flatten(a ):
return list(itertools.chain(*a ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = ASTFeatureExtractor
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ASTFeatureExtractionTester(self )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
snake_case_ = feat_extract(a , padding=a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , padding=a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case_ = np.asarray(a )
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
snake_case_ = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_00 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase ( self , a ) -> Tuple:
from datasets import load_dataset
snake_case_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case_ = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
# fmt: off
snake_case_ = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = ASTFeatureExtractor()
snake_case_ = feature_extractor(a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1E-4 ) )
| 198 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
a__ : Union[str, Any] = 3_0_0 # TEMPERATURE (unit = K)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
a__ : List[str] = 6_37_81_37.0
a__ : Tuple = 6_35_67_52.31_42_45
a__ : str = 6_3_7_8_1_3_7
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = radians(lowerCAmelCase_ )
# Equation
__SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 )
__SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(lowerCAmelCase_ ) * cos(lowerCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] , SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
while a != 0:
_lowerCAmelCase = b % a, a
return b
def __snake_case ( SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: Any ):
"""simple docstring"""
if gcd(lowercase__ , lowercase__ ) != 1:
_lowerCAmelCase = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowercase__ )
_lowerCAmelCase = 1, 0, a
_lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 580 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase = get_tests_dir("""fixtures""")
_UpperCamelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_UpperCamelCase = get_tests_dir("""fixtures/dummy-config.json""")
class __a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 0
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(snake_case ).to_dict()
config_dict.pop("feature_extractor_type" )
lowerCAmelCase__ : Any = WavaVecaFeatureExtractor(**snake_case )
# save in new folder
model_config.save_pretrained(snake_case )
config.save_pretrained(snake_case )
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaises(snake_case ):
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
lowerCAmelCase__ : str = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case )
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case , trust_remote_code=snake_case )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoFeatureExtractor.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : List[Any] = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case )
lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = True
try:
AutoConfig.register("custom" , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
# If remote code is not set, the default is to use local
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(snake_case , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 453 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: int , __A: Any=13 , __A: Optional[int]=7 , __A: List[str]=True , __A: str=True , __A: List[Any]=True , __A: Union[str, Any]=True , __A: Optional[int]=99 , __A: List[str]=[1, 1, 2] , __A: str=1 , __A: List[str]=32 , __A: List[str]=4 , __A: Dict=8 , __A: str=37 , __A: Union[str, Any]="gelu_new" , __A: Tuple=0.1 , __A: str=0.1 , __A: Any=0.0 , __A: Union[str, Any]=512 , __A: List[str]=3 , __A: Tuple=0.0_2 , __A: int=3 , __A: int=4 , __A: Dict=None , __A: Tuple=False , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = block_sizes
a__ = num_decoder_layers
a__ = d_model
a__ = n_head
a__ = d_head
a__ = d_inner
a__ = hidden_act
a__ = hidden_dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = 2
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = initializer_std
# Used in the tests to check the size of the first attention layer
a__ = n_head
# Used in the tests to check the size of the first hidden state
a__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
a__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
a__ = self.num_hidden_layers + 2
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase ( self: List[Any] , __A: Tuple , __A: Dict , __A: Union[str, Any] , __A: str , __A: Tuple , __A: Union[str, Any] , __A: str , ):
'''simple docstring'''
a__ = TFFunnelModel(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
a__ = [input_ids, input_mask]
a__ = model(__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
a__ = False
a__ = TFFunnelModel(config=__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
a__ = False
a__ = TFFunnelModel(config=__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowercase ( self: str , __A: Dict , __A: List[str] , __A: List[str] , __A: int , __A: str , __A: int , __A: Any , ):
'''simple docstring'''
a__ = TFFunnelBaseModel(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
a__ = [input_ids, input_mask]
a__ = model(__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
a__ = False
a__ = TFFunnelBaseModel(config=__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
a__ = False
a__ = TFFunnelBaseModel(config=__A )
a__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowercase ( self: Any , __A: Any , __A: Tuple , __A: int , __A: Optional[int] , __A: int , __A: List[Any] , __A: List[str] , ):
'''simple docstring'''
a__ = TFFunnelForPreTraining(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: Tuple , __A: List[Any] , __A: Union[str, Any] , __A: int , __A: str , __A: Tuple , __A: Optional[Any] , __A: int , ):
'''simple docstring'''
a__ = TFFunnelForMaskedLM(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: str , __A: Union[str, Any] , __A: Optional[int] , __A: Tuple , __A: int , __A: Union[str, Any] , __A: str , __A: Optional[Any] , ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFFunnelForSequenceClassification(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: List[Any] , __A: Union[str, Any] , __A: Any , __A: Dict , __A: Tuple , __A: int , __A: Optional[Any] , __A: int , ):
'''simple docstring'''
a__ = self.num_choices
a__ = TFFunnelForMultipleChoice(config=__A )
a__ = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a__ = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
a__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self: List[Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Tuple , __A: int , __A: int , __A: List[str] , __A: int , ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFFunnelForTokenClassification(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self: Any , __A: Union[str, Any] , __A: Dict , __A: Tuple , __A: List[str] , __A: int , __A: Optional[Any] , __A: Optional[int] , ):
'''simple docstring'''
a__ = TFFunnelForQuestionAnswering(config=__A )
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a__ = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE =(
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
def lowercase ( self: str ):
'''simple docstring'''
a__ = TFFunnelModelTester(self )
a__ = ConfigTester(self , config_class=__A )
def lowercase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self: Dict ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase ( self: str ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def lowercase ( self: Tuple ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
def lowercase ( self: List[Any] ):
'''simple docstring'''
a__ = TFFunnelModelTester(self , base=__A )
a__ = ConfigTester(self , config_class=__A )
def lowercase ( self: int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self: Any ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__A )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowercase ( self: Optional[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
| 200 |
"""simple docstring"""
__a : List[Any] = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__a : Union[str, Any] = frozenset(['prompt', 'negative_prompt'])
__a : Any = frozenset([])
__a : Union[str, Any] = frozenset(['image'])
__a : Dict = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__a : Dict = frozenset(['image'])
__a : Dict = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__a : Optional[Any] = frozenset(['prompt', 'image', 'negative_prompt'])
__a : List[Any] = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__a : Union[str, Any] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__a : Optional[Any] = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__a : int = frozenset(['image', 'mask_image'])
__a : Tuple = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__a : Optional[Any] = frozenset(['example_image', 'image', 'mask_image'])
__a : Optional[Any] = frozenset(['class_labels'])
__a : Tuple = frozenset(['class_labels'])
__a : int = frozenset(['batch_size'])
__a : int = frozenset([])
__a : Union[str, Any] = frozenset(['batch_size'])
__a : Tuple = frozenset([])
__a : Dict = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__a : Dict = frozenset(['prompt', 'negative_prompt'])
__a : Optional[int] = frozenset(['input_tokens'])
__a : str = frozenset(['input_tokens'])
| 200 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = IFPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
def _lowerCamelCase ( self ):
return self._get_dummy_components()
def _lowerCamelCase ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase : Any = torch.manual_seed(a_ )
else:
lowerCAmelCase : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self ):
self._test_save_load_local()
def _lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
# if
lowerCAmelCase : str = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowerCAmelCase : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowerCAmelCase , lowerCAmelCase : Optional[int] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase : str = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : int = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : str = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Dict = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : Dict = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
lowerCAmelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a_ )
lowerCAmelCase : Tuple = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def __A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 525 |
'''simple docstring'''
def __A ( a_ : list[list[float]] ):
lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a_ ):
if len(a_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a_ ) )
return data_lists
def __A ( a_ : list[list[float]] ,a_ : list[int] ):
lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(a_ ,a_ ):
lowerCAmelCase : Optional[Any] = min(a_ )
lowerCAmelCase : List[str] = max(a_ )
lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase : List[Any] = f'''Invalid weight of {weight:f} provided'''
raise ValueError(a_ )
score_lists.append(a_ )
return score_lists
def __A ( a_ : list[list[float]] ):
lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a_ ):
lowerCAmelCase : Optional[Any] = final_scores[j] + ele
return final_scores
def __A ( a_ : list[list[float]] ,a_ : list[int] ):
lowerCAmelCase : Union[str, Any] = get_data(a_ )
lowerCAmelCase : List[Any] = calculate_each_score(a_ ,a_ )
lowerCAmelCase : Optional[Any] = generate_final_scores(a_ )
# append scores to source data
for i, ele in enumerate(a_ ):
source_data[i].append(a_ )
return source_data
| 525 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case = (720, 1280) # Height, Width
__snake_case = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case = 1 / 100
__snake_case = ''
__snake_case = ''
__snake_case = ''
__snake_case = 250
def _lowerCamelCase ( ):
lowercase__ , lowercase__ : int = get_dataset(__A , __A )
for index in range(__A ):
lowercase__ : str = random.sample(range(len(__A ) ) , 4 )
lowercase__ , lowercase__ , lowercase__ : Dict = update_image_and_anno(
__A , __A , __A , __A , __A , filter_scale=__A , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ : List[str] = random_chars(32 )
lowercase__ : Tuple = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowercase__ : Dict = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase__ : Tuple = []
for anno in new_annos:
lowercase__ : Tuple = anno[3] - anno[1]
lowercase__ : str = anno[4] - anno[2]
lowercase__ : Tuple = anno[1] + width / 2
lowercase__ : Dict = anno[2] + height / 2
lowercase__ : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__A )
with open(f'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str ):
lowercase__ : List[str] = []
lowercase__ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__A , """*.txt""" ) ):
lowercase__ : Dict = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__A ) as in_file:
lowercase__ : Union[str, Any] = in_file.readlines()
lowercase__ : int = os.path.join(__A , f'''{label_name}.jpg''' )
lowercase__ : List[Any] = []
for obj_list in obj_lists:
lowercase__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
lowercase__ : Tuple = float(obj[1] ) - float(obj[3] ) / 2
lowercase__ : Union[str, Any] = float(obj[2] ) - float(obj[4] ) / 2
lowercase__ : int = float(obj[1] ) + float(obj[3] ) / 2
lowercase__ : Union[str, Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple = 0.0 , ):
lowercase__ : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase__ : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase__ : Optional[Any] = int(scale_x * output_size[1] )
lowercase__ : Any = int(scale_y * output_size[0] )
lowercase__ : List[str] = []
lowercase__ : Tuple = []
for i, index in enumerate(__A ):
lowercase__ : str = all_img_list[index]
path_list.append(__A )
lowercase__ : List[str] = all_annos[index]
lowercase__ : List[Any] = cva.imread(__A )
if i == 0: # top-left
lowercase__ : int = cva.resize(__A , (divid_point_x, divid_point_y) )
lowercase__ : int = img
for bbox in img_annos:
lowercase__ : List[Any] = bbox[1] * scale_x
lowercase__ : Optional[int] = bbox[2] * scale_y
lowercase__ : Optional[int] = bbox[3] * scale_x
lowercase__ : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase__ : List[Any] = cva.resize(__A , (output_size[1] - divid_point_x, divid_point_y) )
lowercase__ : List[str] = img
for bbox in img_annos:
lowercase__ : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
lowercase__ : Union[str, Any] = bbox[2] * scale_y
lowercase__ : Tuple = scale_x + bbox[3] * (1 - scale_x)
lowercase__ : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase__ : List[str] = cva.resize(__A , (divid_point_x, output_size[0] - divid_point_y) )
lowercase__ : Union[str, Any] = img
for bbox in img_annos:
lowercase__ : List[str] = bbox[1] * scale_x
lowercase__ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase__ : List[str] = bbox[3] * scale_x
lowercase__ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase__ : Dict = cva.resize(
__A , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase__ : int = img
for bbox in img_annos:
lowercase__ : List[str] = scale_x + bbox[1] * (1 - scale_x)
lowercase__ : Tuple = scale_y + bbox[2] * (1 - scale_y)
lowercase__ : Dict = scale_x + bbox[3] * (1 - scale_x)
lowercase__ : Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase__ : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowerCamelCase ( lowerCamelCase__ : int ):
assert number_char > 1, "The number of character should greater than 1"
lowercase__ : str = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 701 |
"""simple docstring"""
import heapq
def _lowerCamelCase ( lowerCamelCase__ : dict ):
lowercase__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__ : Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__ : Optional[Any] = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__ : List[Any] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") | 128 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _a (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self ,__a=None ,**__a ) -> Any:
super().__init__(features=__a )
snake_case : int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self ,__a ) -> Any:
import torch
if isinstance(__a ,__a ) and column:
if all(
isinstance(__a ,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__a )
return column
def snake_case_ ( self ,__a ) -> Tuple:
import torch
if isinstance(__a ,(str, bytes, type(__a )) ):
return value
elif isinstance(__a ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
snake_case : List[str] = {}
if isinstance(__a ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
snake_case : Union[str, Any] = {"""dtype""": torch.intaa}
elif isinstance(__a ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
snake_case : List[str] = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a ,PIL.Image.Image ):
snake_case : Any = np.asarray(__a )
return torch.tensor(__a ,**{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self ,__a ) -> Tuple:
import torch
# support for torch, tf, jax etc.
if hasattr(__a ,"""__array__""" ) and not isinstance(__a ,torch.Tensor ):
snake_case : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a ,np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def snake_case_ ( self ,__a ) -> Optional[Any]:
return map_nested(self._recursive_tensorize ,__a ,map_list=__a )
def snake_case_ ( self ,__a ) -> Mapping:
snake_case : Any = self.numpy_arrow_extractor().extract_row(__a )
snake_case : Optional[int] = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def snake_case_ ( self ,__a ) -> "torch.Tensor":
snake_case : Any = self.numpy_arrow_extractor().extract_column(__a )
snake_case : Any = self.python_features_decoder.decode_column(__a ,pa_table.column_names[0] )
snake_case : List[str] = self.recursive_tensorize(__a )
snake_case : Optional[Any] = self._consolidate(__a )
return column
def snake_case_ ( self ,__a ) -> Mapping:
snake_case : Tuple = self.numpy_arrow_extractor().extract_batch(__a )
snake_case : str = self.python_features_decoder.decode_batch(__a )
snake_case : Optional[Any] = self.recursive_tensorize(__a )
for column_name in batch:
snake_case : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 116 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Dict = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowercase : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowercase : Tuple = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[int] = LxmertTokenizer
def __init__( self ,__a=None ,__a=None ,__a=True ,__a="[UNK]" ,__a="[SEP]" ,__a="[PAD]" ,__a="[CLS]" ,__a="[MASK]" ,__a=True ,__a=None ,**__a ,) -> str:
super().__init__(
__a ,tokenizer_file=__a ,do_lower_case=__a ,unk_token=__a ,sep_token=__a ,pad_token=__a ,cls_token=__a ,mask_token=__a ,tokenize_chinese_chars=__a ,strip_accents=__a ,**__a ,)
snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,__a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,__a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,__a ) != tokenize_chinese_chars
):
snake_case : Union[str, Any] = getattr(__a ,normalizer_state.pop("""type""" ) )
snake_case : Optional[int] = do_lower_case
snake_case : Optional[int] = strip_accents
snake_case : List[Any] = tokenize_chinese_chars
snake_case : Union[str, Any] = normalizer_class(**__a )
snake_case : str = do_lower_case
def snake_case_ ( self ,__a ,__a=None ) -> Optional[int]:
snake_case : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self ,__a ,__a = None ) -> List[int]:
snake_case : int = [self.sep_token_id]
snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self ,__a ,__a = None ) -> Tuple[str]:
snake_case : Optional[int] = self._tokenizer.model.save(__a ,name=__a )
return tuple(__a )
| 116 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
A__ = ""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple ) -> List[Any]:
'''simple docstring'''
A__ = ""
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = int("0b" + data[0] + data[-1] , 2 )
A__ = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Any ) -> Union[str, Any]:
'''simple docstring'''
A__ = message[:4]
A__ = message[4:]
A__ = apply_table(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[:4] ) # noqa: E741
A__ = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[4:] )
A__ = "0" * (2 - len(SCREAMING_SNAKE_CASE_ )) + l # noqa: E741
A__ = "0" * (2 - len(SCREAMING_SNAKE_CASE_ )) + r
A__ = apply_table(l + r , SCREAMING_SNAKE_CASE_ )
A__ = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return temp + right
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter 10 bit key: """)
lowerCAmelCase__ = input("""Enter 8 bit message: """)
lowerCAmelCase__ = [6, 3, 7, 4, 8, 5, 1_0, 9]
lowerCAmelCase__ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
lowerCAmelCase__ = [2, 4, 3, 1]
lowerCAmelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCAmelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCAmelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCAmelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCAmelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCAmelCase__ = apply_table(key, paa_table)
lowerCAmelCase__ = temp[:5]
lowerCAmelCase__ = temp[5:]
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = apply_table(left + right, pa_table)
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = left_shift(left)
lowerCAmelCase__ = left_shift(right)
lowerCAmelCase__ = apply_table(left + right, pa_table)
# encryption
lowerCAmelCase__ = apply_table(message, IP)
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = temp[4:] + temp[:4]
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
lowerCAmelCase__ = apply_table(CT, IP)
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = temp[4:] + temp[:4]
lowerCAmelCase__ = function(expansion, sa, sa, keya, temp)
lowerCAmelCase__ = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 710 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , *lowercase , **lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 626 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( a_ , a_):
snake_case_ = old_name
if "patch_embed" in old_name:
snake_case_ , snake_case_ , snake_case_ = old_name.split('.')
if layer == "0":
snake_case_ = old_name.replace('0' , 'convolution1')
elif layer == "1":
snake_case_ = old_name.replace('1' , 'batchnorm_before')
elif layer == "3":
snake_case_ = old_name.replace('3' , 'convolution2')
else:
snake_case_ = old_name.replace('4' , 'batchnorm_after')
if "network" in old_name and re.search(R'\d\.\d' , a_):
snake_case_ = R'\b\d{2}\b'
if bool(re.search(a_ , a_)):
snake_case_ = re.search(R'\d\.\d\d.' , a_).group()
else:
snake_case_ = re.search(R'\d\.\d.' , a_).group()
if int(match[0]) < 6:
snake_case_ = old_name.replace(a_ , '')
snake_case_ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1])
snake_case_ = 'intermediate_stages.' + trimmed_name
else:
snake_case_ = old_name.replace(a_ , '')
if int(match[2]) < num_meta4D_last_stage:
snake_case_ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2])
else:
snake_case_ = str(int(match[2]) - num_meta4D_last_stage)
snake_case_ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index)
if "norm1" in old_name:
snake_case_ = trimmed_name.replace('norm1' , 'layernorm1')
elif "norm2" in old_name:
snake_case_ = trimmed_name.replace('norm2' , 'layernorm2')
elif "fc1" in old_name:
snake_case_ = trimmed_name.replace('fc1' , 'linear_in')
elif "fc2" in old_name:
snake_case_ = trimmed_name.replace('fc2' , 'linear_out')
snake_case_ = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , a_):
snake_case_ = old_name.replace('network' , 'intermediate_stages')
if "fc" in new_name:
snake_case_ = new_name.replace('fc' , 'convolution')
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case_ = new_name.replace('norm1' , 'batchnorm_before')
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case_ = new_name.replace('norm2' , 'batchnorm_after')
if "proj" in new_name:
snake_case_ = new_name.replace('proj' , 'projection')
if "dist_head" in new_name:
snake_case_ = new_name.replace('dist_head' , 'distillation_classifier')
elif "head" in new_name:
snake_case_ = new_name.replace('head' , 'classifier')
elif "patch_embed" in new_name:
snake_case_ = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case_ = new_name.replace('norm' , 'layernorm')
snake_case_ = 'efficientformer.' + new_name
else:
snake_case_ = 'efficientformer.encoder.' + new_name
return new_name
def __UpperCAmelCase ( a_ , a_):
for key in checkpoint.copy().keys():
snake_case_ = checkpoint.pop(a_)
snake_case_ = val
return checkpoint
def __UpperCAmelCase ( ):
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(a_ , stream=a_).raw)
return image
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ = torch.load(a_ , map_location='cpu')['model']
snake_case_ = EfficientFormerConfig.from_json_file(a_)
snake_case_ = EfficientFormerForImageClassificationWithTeacher(a_)
snake_case_ = '_'.join(checkpoint_path.split('/')[-1].split('.')[0].split('_')[:-1])
snake_case_ = config.depths[-1] - config.num_metaad_blocks + 1
snake_case_ = convert_torch_checkpoint(a_ , a_)
model.load_state_dict(a_)
model.eval()
snake_case_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
snake_case_ = prepare_img()
snake_case_ = 2_56
snake_case_ = 2_24
snake_case_ = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
snake_case_ = processor(images=a_ , return_tensors='pt').pixel_values
# original processing pipeline
snake_case_ = Compose(
[
Resize(a_ , interpolation=pillow_resamplings['bicubic']),
CenterCrop(a_),
ToTensor(),
Normalize(a_ , a_),
])
snake_case_ = image_transforms(a_).unsqueeze(0)
assert torch.allclose(a_ , a_)
snake_case_ = model(a_)
snake_case_ = outputs.logits
snake_case_ = (1, 10_00)
if "l1" in model_name:
snake_case_ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28])
assert torch.allclose(logits[0, :10] , a_ , atol=1E-3)
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case_ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27])
assert torch.allclose(logits[0, :10] , a_ , atol=1E-3)
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case_ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78])
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''')
# Save Checkpoints
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''')
processor.save_pretrained(a_)
print(f'''Processor successfuly saved at {pytorch_dump_path}''')
if push_to_hub:
print('Pushing model to the hub...')
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=a_ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=a_ , )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 198 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowercase = 4
lowercase = 3
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( a_):
for shard in shards:
for i in range(a_):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ):
snake_case_ = int(os.environ['RANK'])
snake_case_ = int(os.environ['WORLD_SIZE'])
snake_case_ = ArgumentParser()
parser.add_argument('--streaming' , type=a_)
parser.add_argument('--local_rank' , type=a_)
parser.add_argument('--num_workers' , type=a_ , default=0)
snake_case_ = parser.parse_args()
snake_case_ = args.streaming
snake_case_ = args.num_workers
snake_case_ = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(a_)]}
snake_case_ = IterableDataset.from_generator(a_ , gen_kwargs=a_)
if not streaming:
snake_case_ = Dataset.from_list(list(a_))
snake_case_ = split_dataset_by_node(a_ , rank=a_ , world_size=a_)
snake_case_ = torch.utils.data.DataLoader(a_ , num_workers=a_)
snake_case_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
snake_case_ = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 198 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
A__ : Tuple = """RegNetConfig"""
# Base docstring
A__ : List[Any] = """facebook/regnet-y-040"""
A__ : Union[str, Any] = [1, 1_088, 7, 7]
# Image classification docstring
A__ : List[Any] = """facebook/regnet-y-040"""
A__ : List[str] = """tabby, tabby cat"""
A__ : List[Any] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 3 , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = "relu" , )-> Any:
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.Convad(
__UpperCamelCase , __UpperCamelCase , kernel_size=__UpperCamelCase , stride=__UpperCamelCase , padding=kernel_size // 2 , groups=__UpperCamelCase , bias=__UpperCamelCase , )
UpperCAmelCase__ : Optional[Any] = nn.BatchNormad(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : List[str] = self.convolution(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.normalization(__UpperCamelCase )
UpperCAmelCase__ : Any = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> str:
super().__init__()
UpperCAmelCase__ : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCAmelCase__ : Optional[int] = config.num_channels
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCAmelCase__ : Optional[Any] = self.embedder(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : Optional[Any] = nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , stride=__UpperCamelCase , bias=__UpperCamelCase )
UpperCAmelCase__ : int = nn.BatchNormad(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tensor:
UpperCAmelCase__ : List[str] = self.convolution(__UpperCamelCase )
UpperCAmelCase__ : str = self.normalization(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
super().__init__()
UpperCAmelCase__ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase__ : Union[str, Any] = nn.Sequential(
nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# b c h w -> b c 1 1
UpperCAmelCase__ : Union[str, Any] = self.pooler(__UpperCamelCase )
UpperCAmelCase__ : Any = self.attention(__UpperCamelCase )
UpperCAmelCase__ : List[str] = hidden_state * attention
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 )-> List[str]:
super().__init__()
UpperCAmelCase__ : Tuple = in_channels != out_channels or stride != 1
UpperCAmelCase__ : List[str] = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : str = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ : List[Any] = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
UpperCAmelCase__ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Optional[Any] = hidden_state
UpperCAmelCase__ : Dict = self.layer(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = self.shortcut(__UpperCamelCase )
hidden_state += residual
UpperCAmelCase__ : Tuple = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 )-> str:
super().__init__()
UpperCAmelCase__ : Any = in_channels != out_channels or stride != 1
UpperCAmelCase__ : str = max(1 , out_channels // config.groups_width )
UpperCAmelCase__ : Union[str, Any] = (
RegNetShortCut(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ : List[str] = nn.Sequential(
RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , groups=__UpperCamelCase , activation=config.hidden_act ) , RegNetSELayer(__UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__UpperCamelCase , __UpperCamelCase , kernel_size=1 , activation=__UpperCamelCase ) , )
UpperCAmelCase__ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : Any = hidden_state
UpperCAmelCase__ : Optional[Any] = self.layer(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.shortcut(__UpperCamelCase )
hidden_state += residual
UpperCAmelCase__ : str = self.activation(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 2 , __UpperCamelCase = 2 , )-> str:
super().__init__()
UpperCAmelCase__ : Optional[int] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
UpperCAmelCase__ : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , stride=__UpperCamelCase , ) , *[layer(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for _ in range(depth - 1 )] , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = self.layers(__UpperCamelCase )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Tuple:
super().__init__()
UpperCAmelCase__ : int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase__ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , depth=__UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True )-> BaseModelOutputWithNoAttention:
UpperCAmelCase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ : Tuple = hidden_states + (hidden_state,)
UpperCAmelCase__ : Optional[Any] = stage_module(__UpperCamelCase )
if output_hidden_states:
UpperCAmelCase__ : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCamelCase , hidden_states=__UpperCamelCase )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = RegNetConfig
_A = 'regnet'
_A = 'pixel_values'
_A = True
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
if isinstance(__UpperCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> str:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = value
A__ : Union[str, Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A__ : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Tuple:
super().__init__(__UpperCamelCase )
UpperCAmelCase__ : List[str] = config
UpperCAmelCase__ : Dict = RegNetEmbeddings(__UpperCamelCase )
UpperCAmelCase__ : List[str] = RegNetEncoder(__UpperCamelCase )
UpperCAmelCase__ : List[str] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None )-> BaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Optional[Any] = self.embedder(__UpperCamelCase )
UpperCAmelCase__ : Any = self.encoder(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_outputs[0]
UpperCAmelCase__ : Tuple = self.pooler(__UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCamelCase , pooler_output=__UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> List[Any]:
super().__init__(__UpperCamelCase )
UpperCAmelCase__ : str = config.num_labels
UpperCAmelCase__ : List[Any] = RegNetModel(__UpperCamelCase )
# classification head
UpperCAmelCase__ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> ImageClassifierOutputWithNoAttention:
UpperCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ : Any = self.regnet(__UpperCamelCase , output_hidden_states=__UpperCamelCase , return_dict=__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ : Optional[Any] = self.classifier(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase__ : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase__ : str = "single_label_classification"
else:
UpperCAmelCase__ : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase__ : Tuple = MSELoss()
if self.num_labels == 1:
UpperCAmelCase__ : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase__ : int = loss_fct(__UpperCamelCase , __UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase__ : Optional[int] = CrossEntropyLoss()
UpperCAmelCase__ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase__ : Optional[Any] = BCEWithLogitsLoss()
UpperCAmelCase__ : Dict = loss_fct(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
UpperCAmelCase__ : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states )
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Dict = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = size if size is not None else {"""shortest_edge""": 2_56}
lowercase__ : str = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = do_resize
lowercase__ : str = size
lowercase__ : Any = resample
lowercase__ : Dict = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : Optional[Any] = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : Optional[int] = do_normalize
lowercase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
lowercase__ : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE_)
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE_)
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_)
lowercase__ : str = resample if resample is not None else self.resample
lowercase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : int = get_size_dict(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Optional[int] = image_std if image_std is not None else self.image_std
lowercase__ : Union[str, Any] = make_list_of_images(SCREAMING_SNAKE_CASE_)
if not valid_images(SCREAMING_SNAKE_CASE_):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(SCREAMING_SNAKE_CASE_) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_) for image in images]
if do_center_crop:
lowercase__ : Any = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_) for image in images]
if do_rescale:
lowercase__ : Tuple = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for image in images]
lowercase__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_)
| 12 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
def __UpperCAmelCase ( A : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(A , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase_ : str = tf.shape(A )
if tensor.shape == tf.TensorShape(A ):
return dynamic
UpperCAmelCase_ : Any = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(A )]
def __UpperCAmelCase ( A : tf.Tensor , A : Optional[int] = None , A : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=A , name=A )
def __UpperCAmelCase ( A : List[str] , A : Union[str, Any] , A : List[str] , A : str=1e-5 , A : Dict=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A , A ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase_ , UpperCAmelCase_ : int = tf.nn.moments(A , axes=[axis] , keepdims=A )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase_ : Tuple = [1] * inputs.shape.rank
UpperCAmelCase_ : int = shape_list(A )[axis]
UpperCAmelCase_ : Dict = tf.reshape(A , A )
UpperCAmelCase_ : Optional[Any] = tf.reshape(A , A )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase_ : Dict = tf.nn.batch_normalization(
A , A , A , offset=A , scale=A , variance_epsilon=A , )
return outputs
def __UpperCAmelCase ( A : Dict , A : int=0 , A : Optional[int]=-1 ) -> str:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase_ : Any = tf.shape(A )
UpperCAmelCase_ : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase_ : Optional[int] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(A , A )
def __UpperCAmelCase ( A : tf.Tensor ) -> tf.Tensor:
if not isinstance(A , tf.Tensor ):
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor(A ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase_ : Tuple = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase_ : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCAmelCase ( A : tf.Tensor , A : int , A : str = "input_ids" ) -> None:
tf.debugging.assert_less(
A , tf.cast(A , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(A )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def __UpperCAmelCase ( A : Dict , A : Tuple , A : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase_ : Tuple = [x for x in data if len(A ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
UpperCAmelCase_ : Union[str, Any] = np.asarray(A )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : str = np.array_split(A , A )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase_ : Dict = np.array_split(A , A )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(A ):
UpperCAmelCase_ : int = chunk_data
else:
UpperCAmelCase_ : List[Any] = data
def __UpperCAmelCase ( A : int , A : Optional[Any] ) -> Tuple:
if name in group.attrs:
UpperCAmelCase_ : Optional[int] = [n.decode('''utf8''' ) if hasattr(A , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(A , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCAmelCase ( A : Tuple ) -> str:
def _expand_single_ad_tensor(A : int ):
if isinstance(A , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(A , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , A )
| 541 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__lowerCamelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__lowerCamelCase : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a__ ( A__ ):
A = 'whisper'
A = ['past_key_values']
A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict,_A : Any=5_1865,_A : Union[str, Any]=80,_A : Union[str, Any]=6,_A : Tuple=4,_A : int=6,_A : Any=4,_A : List[Any]=1536,_A : Tuple=1536,_A : Dict=0.0,_A : Optional[Any]=0.0,_A : str=5_0257,_A : List[str]=True,_A : Union[str, Any]=True,_A : Any="gelu",_A : List[str]=256,_A : Any=0.0,_A : List[str]=0.0,_A : Optional[int]=0.0,_A : Any=0.02,_A : Optional[Any]=False,_A : Any=1500,_A : Any=448,_A : str=5_0256,_A : List[Any]=5_0256,_A : Optional[Any]=5_0256,_A : Dict=None,_A : Any=[220, 5_0256],_A : str=False,_A : Optional[int]=256,_A : Any=False,_A : Tuple=0.05,_A : Union[str, Any]=10,_A : List[Any]=2,_A : Tuple=0.0,_A : int=10,_A : Optional[Any]=0,_A : Union[str, Any]=7,**_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : int = num_mel_bins
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : str = encoder_layers
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE_ : str = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : int = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : str = dropout
SCREAMING_SNAKE_CASE_ : Any = attention_dropout
SCREAMING_SNAKE_CASE_ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE_ : List[str] = activation_function
SCREAMING_SNAKE_CASE_ : Tuple = init_std
SCREAMING_SNAKE_CASE_ : List[str] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Tuple = use_cache
SCREAMING_SNAKE_CASE_ : int = encoder_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : int = max_source_positions
SCREAMING_SNAKE_CASE_ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : List[Any] = classifier_proj_size
SCREAMING_SNAKE_CASE_ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Tuple = apply_spec_augment
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE_ : Dict = mask_time_length
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : str = mask_feature_length
SCREAMING_SNAKE_CASE_ : Any = mask_feature_min_masks
SCREAMING_SNAKE_CASE_ : Dict = median_filter_width
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,is_encoder_decoder=_A,decoder_start_token_id=_A,suppress_tokens=_A,begin_suppress_tokens=_A,**_A,)
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ : Dict = {0: "batch"}
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A,direction="inputs" )
return common_inputs
def __UpperCamelCase ( self : str,_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional["TensorType"] = None,_A : int = 2_2050,_A : float = 5.0,_A : int = 220,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=_A,framework=_A,sampling_rate=_A,time_duration=_A,frequency=_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_inputs["input_features"].shape[2]
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE_ : int = super().generate_dummy_inputs(
preprocessor.tokenizer,_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : int = encoder_inputs.pop("input_features" )
SCREAMING_SNAKE_CASE_ : List[str] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE_ : int = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return 1E-3
| 316 | import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( A__ , unittest.TestCase ):
A = BioGptTokenizer
A = False
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE_ : List[Any] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w" ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file,"w" ) as fp:
fp.write("\n".join(_A ) )
def __UpperCamelCase ( self : List[str],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "lower newer"
SCREAMING_SNAKE_CASE_ : Optional[int] = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptTokenizer(self.vocab_file,self.merges_file )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "lower"
SCREAMING_SNAKE_CASE_ : int = ["low", "er</w>"]
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : int = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE_ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ),_A )
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode("sequence builders",add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode("multi-sequence build",add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.build_inputs_with_special_tokens(_A,_A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 316 | 1 |
"""simple docstring"""
_lowerCAmelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase : Optional[int] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase : Optional[Any] = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase : Union[str, Any] = year // 100
_UpperCAmelCase : Union[str, Any] = (5 * (century % 4) + 2) % 7
_UpperCAmelCase : int = year % 100
_UpperCAmelCase : Optional[int] = centurian % 12
_UpperCAmelCase : str = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase : int = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 |
"""simple docstring"""
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 76 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
snake_case__ : List[Any] = {
"""E""": 1_2.7_0,
"""T""": 9.0_6,
"""A""": 8.1_7,
"""O""": 7.5_1,
"""I""": 6.9_7,
"""N""": 6.7_5,
"""S""": 6.3_3,
"""H""": 6.0_9,
"""R""": 5.9_9,
"""D""": 4.2_5,
"""L""": 4.0_3,
"""C""": 2.7_8,
"""U""": 2.7_6,
"""M""": 2.4_1,
"""W""": 2.3_6,
"""F""": 2.2_3,
"""G""": 2.0_2,
"""Y""": 1.9_7,
"""P""": 1.9_3,
"""B""": 1.2_9,
"""V""": 0.9_8,
"""K""": 0.7_7,
"""J""": 0.1_5,
"""X""": 0.1_5,
"""Q""": 0.1_0,
"""Z""": 0.0_7,
}
snake_case__ : str = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
snake_case__ : Optional[int] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
return x[0]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = get_letter_count(_SCREAMING_SNAKE_CASE )
__lowercase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_SCREAMING_SNAKE_CASE )
__lowercase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_SCREAMING_SNAKE_CASE )
__lowercase = "".join(freq_to_letter[freq] )
__lowercase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE )
__lowercase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = get_frequency_order(_SCREAMING_SNAKE_CASE )
__lowercase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__lowercase = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["prompt"]
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
if "image" in inputs:
__lowercase = inputs["image"]
else:
__lowercase = None
if "mask_image" in inputs:
__lowercase = inputs["mask_image"]
else:
__lowercase = None
if "original_image" in inputs:
__lowercase = inputs["original_image"]
else:
__lowercase = None
__lowercase , __lowercase = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = inputs["generator"]
__lowercase = inputs["num_inference_steps"]
__lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
__lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__lowercase = image
if mask_image is not None:
__lowercase = mask_image
if original_image is not None:
__lowercase = original_image
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__lowercase = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowercase = self.get_dummy_inputs(lowerCamelCase )
__lowercase = pipe_loaded(**lowerCamelCase )[0]
__lowercase = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1e-4 )
| 655 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
def a__ ( A__ ):
print('Loading config file...' )
def flatten_yaml_as_dict(A__, A__="", A__="." ):
SCREAMING_SNAKE_CASE_ : List[str] = []
for k, v in d.items():
SCREAMING_SNAKE_CASE_ : Tuple = parent_key + sep + k if parent_key else k
if isinstance(A__, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(A__, A__, sep=A__ ).items() )
else:
items.append((new_key, v) )
return dict(A__ )
SCREAMING_SNAKE_CASE_ : Dict = argparse.Namespace()
with open(A__, 'r' ) as yaml_file:
try:
SCREAMING_SNAKE_CASE_ : Optional[Any] = yaml.load(A__, Loader=yaml.FullLoader )
SCREAMING_SNAKE_CASE_ : List[str] = flatten_yaml_as_dict(A__ )
for k, v in flat_cfg.items():
setattr(A__, A__, A__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(A__, str(A__ ) ) )
return config
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MobileViTVaConfig()
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# dataset
if task_name.startswith('imagenet1k_' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
SCREAMING_SNAKE_CASE_ : List[str] = 3_8_4
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2_5_6
SCREAMING_SNAKE_CASE_ : List[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
SCREAMING_SNAKE_CASE_ : str = 2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
SCREAMING_SNAKE_CASE_ : List[Any] = 3_8_4
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2_5_6
SCREAMING_SNAKE_CASE_ : str = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
SCREAMING_SNAKE_CASE_ : str = 1_5_1
SCREAMING_SNAKE_CASE_ : List[str] = 5_1_2
SCREAMING_SNAKE_CASE_ : Optional[int] = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE_ : Any = True
elif task_name.startswith('voc_' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 2_1
SCREAMING_SNAKE_CASE_ : Any = 5_1_2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'pascal-voc-id2label.json'
SCREAMING_SNAKE_CASE_ : int = True
# orig_config
SCREAMING_SNAKE_CASE_ : Dict = load_orig_config_file(A__ )
assert getattr(A__, 'model.classification.name', -1 ) == "mobilevit_v2", "Invalid model"
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(A__, 'model.classification.mitv2.width_multiplier', 1.0 )
assert (
getattr(A__, 'model.classification.mitv2.attn_norm_layer', -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
SCREAMING_SNAKE_CASE_ : int = getattr(A__, 'model.classification.activation.name', 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'model.segmentation.output_stride', 1_6 )
if "_deeplabv3" in task_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, 'model.segmentation.deeplabv3.aspp_rates', [1_2, 2_4, 3_6] )
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(A__, 'model.segmentation.deeplabv3.aspp_out_channels', 5_1_2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(A__, 'model.segmentation.deeplabv3.aspp_dropout', 0.1 )
# id2label
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : Tuple = json.load(open(hf_hub_download(A__, A__, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Any = idalabel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = dct.pop(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = val
def a__ ( A__, A__=False ):
if base_model:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'mobilevitv2.'
SCREAMING_SNAKE_CASE_ : Any = []
for k in state_dict.keys():
if k[:8] == "encoder.":
SCREAMING_SNAKE_CASE_ : str = k[8:]
else:
SCREAMING_SNAKE_CASE_ : Dict = k
if ".block." in k:
SCREAMING_SNAKE_CASE_ : Tuple = k_new.replace('.block.', '.' )
if ".conv." in k:
SCREAMING_SNAKE_CASE_ : Any = k_new.replace('.conv.', '.convolution.' )
if ".norm." in k:
SCREAMING_SNAKE_CASE_ : Dict = k_new.replace('.norm.', '.normalization.' )
if "conv_1." in k:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = k_new.replace('conv_1.', F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
SCREAMING_SNAKE_CASE_ : Any = k_new.replace(F'''layer_{i}.''', F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace('.exp_1x1.', '.expand_1x1.' )
if ".red_1x1." in k:
SCREAMING_SNAKE_CASE_ : Tuple = k_new.replace('.red_1x1.', '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace(F'''layer_{i}.0.''', F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = k_new.replace(F'''layer_{i}.1.local_rep.0.''', F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
SCREAMING_SNAKE_CASE_ : Dict = k_new.replace(F'''layer_{i}.1.local_rep.1.''', F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0, 1]
elif i == 4:
SCREAMING_SNAKE_CASE_ : Any = [0, 1, 2, 3]
elif i == 5:
SCREAMING_SNAKE_CASE_ : Any = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
SCREAMING_SNAKE_CASE_ : str = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''', F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''', F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
SCREAMING_SNAKE_CASE_ : List[str] = k_new.replace(F'''layer_{i}.1.conv_proj.''', F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
SCREAMING_SNAKE_CASE_ : List[str] = k_new.replace('pre_norm_attn.0.', 'layernorm_before.' )
if "pre_norm_attn.1." in k:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = k_new.replace('pre_norm_attn.1.', 'attention.' )
if "pre_norm_ffn.0." in k:
SCREAMING_SNAKE_CASE_ : str = k_new.replace('pre_norm_ffn.0.', 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
SCREAMING_SNAKE_CASE_ : Optional[int] = k_new.replace('pre_norm_ffn.1.', 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
SCREAMING_SNAKE_CASE_ : List[Any] = k_new.replace('pre_norm_ffn.3.', 'ffn.conv2.' )
if "classifier.1." in k:
SCREAMING_SNAKE_CASE_ : int = k_new.replace('classifier.1.', 'classifier.' )
if "seg_head." in k:
SCREAMING_SNAKE_CASE_ : str = k_new.replace('seg_head.', 'segmentation_head.' )
if ".aspp_layer." in k:
SCREAMING_SNAKE_CASE_ : List[str] = k_new.replace('.aspp_layer.', '.' )
if ".aspp_pool." in k:
SCREAMING_SNAKE_CASE_ : List[Any] = k_new.replace('.aspp_pool.', '.' )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(A__ )
for k in keys_to_ignore:
state_dict.pop(A__, A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
SCREAMING_SNAKE_CASE_ : Dict = Image.open(requests.get(A__, stream=A__ ).raw )
return im
@torch.no_grad()
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = get_mobilevitva_config(A__, A__ )
# load original state_dict
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(A__, map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MobileViTVaForSemanticSegmentation(A__ ).eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = MobileViTVaForImageClassification(A__ ).eval()
SCREAMING_SNAKE_CASE_ : str = False
# remove and rename some keys of load the original model
SCREAMING_SNAKE_CASE_ : Dict = checkpoint
remove_unused_keys(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = create_rename_keys(A__, base_model=A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__, A__, A__ )
# load modified state_dict
model.load_state_dict(A__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : List[str] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 3_2 )
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=prepare_img(), return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**A__ )
# verify classification model
if task_name.startswith('imagenet' ):
SCREAMING_SNAKE_CASE_ : Any = outputs.logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits.argmax(-1 ).item()
print('Predicted class:', model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3], A__, atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ : Any =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 101 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[int]=0 ) -> Dict:
UpperCAmelCase = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowerCAmelCase__ ) )
UpperCAmelCase = np.random.RandomState(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self : List[str] ) -> int:
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : str ) -> List[str]:
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Tuple ) -> Dict:
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# warmup pass to apply optimizations
UpperCAmelCase = pipe(**self.get_dummy_inputs() )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs()
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCAmelCase = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : Any ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ort.SessionOptions()
UpperCAmelCase = False
return options
def _UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = "A fantasy landscape, trending on artstation"
UpperCAmelCase = np.random.RandomState(0 )
UpperCAmelCase = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase__ , output_type="np" , )
UpperCAmelCase = output.images
UpperCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
UpperCAmelCase = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase = init_image.resize((7_6_8, 5_1_2) )
UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = "A fantasy landscape, trending on artstation"
UpperCAmelCase = np.random.RandomState(0 )
UpperCAmelCase = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase__ , output_type="np" , )
UpperCAmelCase = output.images
UpperCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
UpperCAmelCase = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase : Dict = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class lowerCAmelCase__ ( _UpperCamelCase ):
'''simple docstring'''
lowercase_ = 4_2
class lowerCAmelCase__ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=lowerCamelCase_ , image_encoder=lowerCamelCase_ , image_processor=lowerCamelCase_ , scheduler=lowerCamelCase_ , renderer=lowerCamelCase_ , )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if latents is None:
__A =randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__A =latents.to(lowerCamelCase_ )
__A =latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self , lowercase__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__A =torch.device(f'''cuda:{gpu_id}''' )
__A =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
__A =torch.cat(lowerCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCamelCase_ , axis=0 )
if not isinstance(lowerCamelCase_ , torch.Tensor ):
__A =self.image_processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
__A =image.to(dtype=self.image_encoder.dtype , device=lowerCamelCase_ )
__A =self.image_encoder(lowerCamelCase_ )['last_hidden_state']
__A =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__A =image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__A =torch.zeros_like(lowerCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self , lowercase__ , lowercase__ = 1 , lowercase__ = 2_5 , lowercase__ = None , lowercase__ = None , lowercase__ = 4.0 , lowercase__ = 6_4 , lowercase__ = "pil" , lowercase__ = True , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , PIL.Image.Image ):
__A =1
elif isinstance(lowerCamelCase_ , torch.Tensor ):
__A =image.shape[0]
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__A =len(lowerCamelCase_ )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCamelCase_ )}''' )
__A =self._execution_device
__A =batch_size * num_images_per_prompt
__A =guidance_scale > 1.0
__A =self._encode_image(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# prior
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
__A =self.scheduler.timesteps
__A =self.prior.config.num_embeddings
__A =self.prior.config.embedding_dim
__A =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__A =latents.reshape(latents.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__A =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A =self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
__A =self.prior(
lowerCamelCase_ , timestep=lowerCamelCase_ , proj_embedding=lowerCamelCase_ , ).predicted_image_embedding
# remove the variance
__A =noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__A =noise_pred.chunk(2 )
__A =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__A =self.scheduler.step(
lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCamelCase_ )
__A =[]
for i, latent in enumerate(lowerCamelCase_ ):
print()
__A =self.renderer.decode(
latent[None, :] , lowerCamelCase_ , size=lowerCamelCase_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowerCamelCase_ )
__A =torch.stack(lowerCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
__A =images.cpu().numpy()
if output_type == "pil":
__A =[self.numpy_to_pil(lowerCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCamelCase_ )
| 184 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase: List[Any] = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 266 | 0 |
import math
def A ( snake_case__ : list , snake_case__ : int = 0 , snake_case__ : int = 0 ) -> int:
'''simple docstring'''
__snake_case = end or len(snake_case_ )
for i in range(snake_case_ , snake_case_ ):
__snake_case = i
__snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case = array[temp_index - 1]
temp_index -= 1
__snake_case = temp_index_value
return array
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int ) -> List[Any]: # Max Heap
'''simple docstring'''
__snake_case = index
__snake_case = 2 * index + 1 # Left Node
__snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case = right_index
if largest != index:
__snake_case = array[largest], array[index]
heapify(snake_case_ , snake_case_ , snake_case_ )
def A ( snake_case__ : list ) -> Optional[Any]:
'''simple docstring'''
__snake_case = len(snake_case_ )
for i in range(n // 2 , -1 , -1 ):
heapify(snake_case_ , snake_case_ , snake_case_ )
for i in range(n - 1 , 0 , -1 ):
__snake_case = array[0], array[i]
heapify(snake_case_ , 0 , snake_case_ )
return array
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Dict:
'''simple docstring'''
__snake_case = low
__snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case = array[j], array[i]
i += 1
def A ( snake_case__ : list ) -> Dict:
'''simple docstring'''
if len(snake_case_ ) == 0:
return array
__snake_case = 2 * math.ceil(math.loga(len(snake_case_ ) ) )
__snake_case = 16
return intro_sort(snake_case_ , 0 , len(snake_case_ ) , snake_case_ , snake_case_ )
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Any:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case_ )
max_depth -= 1
__snake_case = median_of_a(snake_case_ , snake_case_ , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case = partition(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
intro_sort(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__snake_case = p
return insertion_sort(snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[Any] = input("Enter numbers separated by a comma : ").strip()
UpperCAmelCase__ : int = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( *_lowercase :Any , **_lowercase :Optional[int] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Any , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase ( self :List[Any] , _lowercase :int , _lowercase :Any ):
'''simple docstring'''
lowercase__ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
"score": ANY(_lowercase ),
"label": ANY(_lowercase ),
"box": {"xmin": ANY(_lowercase ), "ymin": ANY(_lowercase ), "xmax": ANY(_lowercase ), "ymax": ANY(_lowercase )},
} , )
import datasets
lowercase__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowercase__ = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
"score": ANY(_lowercase ),
"label": ANY(_lowercase ),
"box": {"xmin": ANY(_lowercase ), "ymin": ANY(_lowercase ), "xmax": ANY(_lowercase ), "ymax": ANY(_lowercase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
@require_torch
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowercase__ = AutoModelForObjectDetection.from_pretrained(_lowercase )
lowercase__ = AutoFeatureExtractor.from_pretrained(_lowercase )
lowercase__ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
lowercase__ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
] , )
lowercase__ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
] , )
@require_torch
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = "facebook/detr-resnet-50"
lowercase__ = AutoModelForObjectDetection.from_pretrained(_lowercase )
lowercase__ = AutoFeatureExtractor.from_pretrained(_lowercase )
lowercase__ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
lowercase__ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
lowercase__ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = "facebook/detr-resnet-50"
lowercase__ = pipeline("object-detection" , model=_lowercase )
lowercase__ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
lowercase__ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = 0.9985
lowercase__ = "facebook/detr-resnet-50"
lowercase__ = pipeline("object-detection" , model=_lowercase )
lowercase__ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = "Narsil/layoutlmv3-finetuned-funsd"
lowercase__ = 0.9993
lowercase__ = pipeline("object-detection" , model=_lowercase , threshold=_lowercase )
lowercase__ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
] , )
| 655 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = 'audio-spectrogram-transformer'
def __init__( self , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-1_2 , A_=16 , A_=True , A_=10 , A_=10 , A_=10_24 , A_=1_28 , **A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = patch_size
_lowerCamelCase = qkv_bias
_lowerCamelCase = frequency_stride
_lowerCamelCase = time_stride
_lowerCamelCase = max_length
_lowerCamelCase = num_mel_bins | 714 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 638 | 0 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
lowercase = model(snake_case , token_type_ids=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ):
lowercase = self.num_labels
lowercase = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCamelCase : str = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=False ):
lowercase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
lowercase = inputs_dict['labels']
lowercase = inputs_dict['labels']
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case )
lowercase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
lowercase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 84 | 1 |
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_0_0 , ):
lowerCamelCase_ = x_start
lowerCamelCase_ = fnc(lowerCamelCase__ )
lowerCamelCase_ = 0.0
for _ in range(lowerCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCamelCase_ = (x_end - x_start) / steps + xa
lowerCamelCase_ = fnc(lowerCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCamelCase_ = xa
lowerCamelCase_ = fxa
return area
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCamelCase__ ):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__A =1_0
while i <= 1_0_0_0_0_0:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'vit'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> int:
super().__init__(**lowercase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = encoder_stride
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE_( self ) -> float:
return 1e-4
| 313 | 0 |
import numpy as np
def _lowerCamelCase ( lowerCamelCase_: np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowerCamelCase ( lowerCamelCase_: np.ndarray ):
'''simple docstring'''
return vector * sigmoid(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 |
from statistics import mean
import numpy as np
def _lowerCamelCase ( lowerCamelCase_: list , lowerCamelCase_: list , lowerCamelCase_: list , lowerCamelCase_: int ):
'''simple docstring'''
A : Tuple = 0
# Number of processes finished
A : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A : int = [0] * no_of_process
# List to include calculation results
A : List[Any] = [0] * no_of_process
# Sort by arrival time.
A : int = [burst_time[i] for i in np.argsort(lowerCamelCase_ )]
A : int = [process_name[i] for i in np.argsort(lowerCamelCase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
A : List[str] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A : Optional[int] = arrival_time[i]
A : Tuple = 0
# Index showing the location of the process being performed
A : List[Any] = 0
# Saves the current response ratio.
A : Any = 0
for i in range(0 , lowerCamelCase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A : str = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A : List[str] = temp
A : Dict = i
# Calculate the turn around time
A : int = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A : str = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _lowerCamelCase ( lowerCamelCase_: list , lowerCamelCase_: list , lowerCamelCase_: list , lowerCamelCase_: int ):
'''simple docstring'''
A : List[Any] = [0] * no_of_process
for i in range(0 , lowerCamelCase_ ):
A : Tuple = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCamelCase_ = 5
UpperCamelCase_ = ["A", "B", "C", "D", "E"]
UpperCamelCase_ = [1, 2, 3, 4, 5]
UpperCamelCase_ = [1, 2, 3, 4, 5]
UpperCamelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCamelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''') | 256 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
_lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
re.sub("""<n>""" , """""" , UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase ) )
| 160 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> Optional[Any]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Optional[int] = use_auxiliary_loss
lowerCAmelCase__ : Optional[Any] = num_queries
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[Any] = min_size
lowerCAmelCase__ : Dict = max_size
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Any = mask_feature_size
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : List[str] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Any:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = output.encoder_hidden_states
lowerCAmelCase__ : Dict = output.pixel_decoder_hidden_states
lowerCAmelCase__ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
with torch.no_grad():
lowerCAmelCase__ : List[str] = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Any = MaskFormerModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : Dict = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Any = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Optional[int] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Optional[Any] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : int = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : str = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Any = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : int = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Dict = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : str = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : List[str] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : int = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : int = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 160 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for attribute in key.split('''.''' ):
A_ : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
A_ : int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
A_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Union[str, Any] = value
elif weight_type == "weight_v":
A_ : Tuple = value
elif weight_type == "bias":
A_ : int = value
else:
A_ : Any = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = []
A_ : Tuple = fairseq_model.state_dict()
A_ : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
A_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
A_ : Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
A_ : Dict = True
if "*" in mapped_key:
A_ : Any = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
A_ : Optional[Any] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
A_ : List[str] = '''weight_g'''
elif "weight_v" in name:
A_ : Any = '''weight_v'''
elif "weight" in name:
A_ : List[Any] = '''weight'''
elif "bias" in name:
A_ : Optional[Any] = '''bias'''
else:
A_ : Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
A_ : Union[str, Any] = name.split('''.''' )
A_ : Optional[int] = int(items[0] )
A_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
if config_path is not None:
A_ : List[str] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
A_ : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
A_ : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : Any = target_dict.pad_index
A_ : int = target_dict.bos_index
A_ : List[Any] = target_dict.eos_index
A_ : Union[str, Any] = len(target_dict.symbols )
A_ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
A_ : Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
A_ : Any = True if config.feat_extract_norm == '''layer''' else False
A_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
A_ : List[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A_ : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
A_ : Union[str, Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
A_ , A_ , A_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A_ , A_ , A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : str = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 590 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 590 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Dict , *lowercase__ : Any , **lowercase__ : Tuple) ->None:
"""simple docstring"""
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__)
| 572 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
_lowerCamelCase = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Tuple = DistilBertTokenizer
def __init__( self : Tuple , lowercase__ : Tuple=None , lowercase__ : Dict=None , lowercase__ : Dict=True , lowercase__ : Tuple="[UNK]" , lowercase__ : Optional[int]="[SEP]" , lowercase__ : Union[str, Any]="[PAD]" , lowercase__ : Optional[Any]="[CLS]" , lowercase__ : Dict="[MASK]" , lowercase__ : int=True , lowercase__ : List[Any]=None , **lowercase__ : Tuple , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
_lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowercase__) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase__) != tokenize_chinese_chars
):
_lowercase = getattr(lowercase__ , normalizer_state.pop("""type"""))
_lowercase = do_lower_case
_lowercase = strip_accents
_lowercase = tokenize_chinese_chars
_lowercase = normalizer_class(**lowercase__)
_lowercase = do_lower_case
def _UpperCAmelCase ( self : Dict , lowercase__ : Optional[int] , lowercase__ : List[str]=None) ->Union[str, Any]:
"""simple docstring"""
_lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : Tuple , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _UpperCAmelCase ( self : Dict , lowercase__ : str , lowercase__ : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
_lowercase = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
| 572 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , __lowerCamelCase : bool = True , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[str]=3_0 , __lowerCamelCase : str=4_0_0 , __lowerCamelCase : Any=3 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = do_resize
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_8_8}
UpperCAmelCase = size_divisor
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = do_center_crop
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_pad
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
UpperCAmelCase = self.size["""shortest_edge"""]
UpperCAmelCase = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
UpperCAmelCase = size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCAmelCase , UpperCAmelCase = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase = scale * h, size
UpperCAmelCase = int((1_3_3_3 / 8_0_0) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
UpperCAmelCase = max_size / max(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = newh * scale
UpperCAmelCase = neww * scale
UpperCAmelCase , UpperCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase , UpperCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
UpperCAmelCase = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( __snake_case , unittest.TestCase ):
UpperCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size_divisor""" ) )
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 377 |
class __lowercase :
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = {}
def _lowercase ( self : str ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(__lowerCamelCase , """ -> """ , """ -> """.join([str(__lowerCamelCase ) for j in self.vertex[i]] ) )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase )
else:
# else make a new vertex
UpperCAmelCase = [to_vertex]
def _lowercase ( self : str ) -> None:
"""simple docstring"""
UpperCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : list ) -> None:
"""simple docstring"""
UpperCAmelCase = True
print(__lowerCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__a = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 377 | 1 |
def _a ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585 |
import math
from numpy import inf
from scipy.integrate import quad
def _a ( __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , args=(__SCREAMING_SNAKE_CASE) )[0]
def _a ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(__SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 585 | 1 |
import re
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
lowercase__ = split_input(SCREAMING_SNAKE_CASE )
if upper:
lowercase__ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase__ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_simple_case(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
lowercase__ = to_simple_case(SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''_''' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case__ : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = data
def __iter__( self ) -> Any:
'''simple docstring'''
for element in self.data:
yield element
def _UpperCamelCase ( __UpperCamelCase=True ) -> str:
lowerCamelCase_ = Accelerator(even_batches=_A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ) -> List[str]:
if iterable:
lowerCamelCase_ = DummyIterableDataset(torch.as_tensor(range(_A ) ) )
else:
lowerCamelCase_ = TensorDataset(torch.as_tensor(range(_A ) ) )
lowerCamelCase_ = DataLoader(_A ,batch_size=_A )
lowerCamelCase_ = accelerator.prepare(_A )
return dl
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Any:
lowerCamelCase_ = create_dataloader(accelerator=_A ,dataset_size=_A ,batch_size=_A )
lowerCamelCase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_A ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1, 1] ,)
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_A ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 2] ,)
def _UpperCamelCase ( ) -> List[str]:
lowerCamelCase_ = create_accelerator(even_batches=_A )
verify_dataloader_batch_sizes(
_A ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1] ,)
verify_dataloader_batch_sizes(
_A ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 1] ,)
def _UpperCamelCase ( ) -> Dict:
lowerCamelCase_ = create_accelerator(even_batches=_A )
lowerCamelCase_ = torch.nn.Linear(1 ,1 )
lowerCamelCase_ = accelerator.prepare(_A )
lowerCamelCase_ = create_dataloader(_A ,dataset_size=3 ,batch_size=1 )
lowerCamelCase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_A ):
lowerCamelCase_ = ddp_model(batch[0].float() )
lowerCamelCase_ = output.sum()
loss.backward()
batch_idxs.append(_A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _UpperCamelCase ( __UpperCamelCase ) -> Any:
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category ,_A )
assert "only supported for multi-GPU" in str(w[-1].message )
def _UpperCamelCase ( ) -> Dict:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = create_accelerator(even_batches=_A )
lowerCamelCase_ = torch.nn.Linear(1 ,1 )
lowerCamelCase_ = accelerator.prepare(_A )
lowerCamelCase_ = create_dataloader(_A ,dataset_size=3 ,batch_size=1 )
lowerCamelCase_ = create_dataloader(_A ,dataset_size=3 ,batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_A ):
lowerCamelCase_ = train_dl.batch_sampler.even_batches
lowerCamelCase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _UpperCamelCase ( ) -> int:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = create_accelerator(even_batches=_A )
lowerCamelCase_ = torch.nn.Linear(1 ,1 )
lowerCamelCase_ = accelerator.prepare(_A )
create_dataloader(_A ,dataset_size=3 ,batch_size=1 ,iterable=_A )
lowerCamelCase_ = create_dataloader(_A ,dataset_size=3 ,batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_A ):
lowerCamelCase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = create_accelerator()
lowerCamelCase_ = torch.nn.Linear(1 ,1 )
lowerCamelCase_ = accelerator.prepare(_A )
create_dataloader(_A ,dataset_size=3 ,batch_size=1 ,iterable=_A )
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=_A ):
pass
assert issubclass(w[-1].category ,_A )
assert "only supported for map-style datasets" in str(w[-1].message )
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
lowerCamelCase_ = accelerator.state.distributed_type
lowerCamelCase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_A )
lowerCamelCase_ = original_state
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCamelCase_ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = 'facebook/sam-vit-huge'
lowerCamelCase_ = pipeline('mask-generation' , model=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCamelCase_ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_053},
] , )
| 384 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__snake_case, '''_dynamo''' ):
return False
return isinstance(__snake_case, torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase__ ( __snake_case, __snake_case = True ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCamelCase = is_compiled_module(__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case, __snake_case ):
_UpperCamelCase = model.module
if not keep_fpaa_wrapper:
_UpperCamelCase = getattr(__snake_case, '''forward''' )
_UpperCamelCase = model.__dict__.pop('''_original_forward''', __snake_case )
if original_forward is not None:
while hasattr(__snake_case, '''__wrapped__''' ):
_UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
_UpperCamelCase = forward
if getattr(__snake_case, '''_converted_to_transformer_engine''', __snake_case ):
convert_model(__snake_case, to_transformer_engine=__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = compiled_model
return model
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
PartialState().wait_for_everyone()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case, __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case, __snake_case )
@contextmanager
def lowerCamelCase__ ( **__snake_case ) -> Tuple:
"""simple docstring"""
for key, value in kwargs.items():
_UpperCamelCase = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(__snake_case, '''__qualname__''' ) and not hasattr(__snake_case, '''__name__''' ):
_UpperCamelCase = getattr(__snake_case, '''__class__''', __snake_case )
if hasattr(__snake_case, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__snake_case, '''__name__''' ):
return obj.__name__
return str(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = destination.setdefault(__snake_case, {} )
merge_dicts(__snake_case, __snake_case )
else:
_UpperCamelCase = value
return destination
def lowerCamelCase__ ( __snake_case = None ) -> bool:
"""simple docstring"""
if port is None:
_UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 19 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_text_model'
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = type_vocab_size
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
_SCREAMING_SNAKE_CASE : Any = use_cache
_SCREAMING_SNAKE_CASE : str = pad_token_id
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_vision_model'
def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 6_0_0 , __lowerCamelCase = 2.0 , __lowerCamelCase = 3.1 , __lowerCamelCase = 8 , __lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase = [] , __lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase = 0.25 , __lowerCamelCase = "swish" , __lowerCamelCase = 2_5_6_0 , __lowerCamelCase = "mean" , __lowerCamelCase = 0.02 , __lowerCamelCase = 0.001 , __lowerCamelCase = 0.99 , __lowerCamelCase = 0.2 , **__lowerCamelCase , ) -> Dict:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = num_channels
_SCREAMING_SNAKE_CASE : Tuple = image_size
_SCREAMING_SNAKE_CASE : Tuple = width_coefficient
_SCREAMING_SNAKE_CASE : str = depth_coefficient
_SCREAMING_SNAKE_CASE : int = depth_divisor
_SCREAMING_SNAKE_CASE : Union[str, Any] = kernel_sizes
_SCREAMING_SNAKE_CASE : Tuple = in_channels
_SCREAMING_SNAKE_CASE : int = out_channels
_SCREAMING_SNAKE_CASE : Optional[Any] = depthwise_padding
_SCREAMING_SNAKE_CASE : List[str] = strides
_SCREAMING_SNAKE_CASE : Any = num_block_repeats
_SCREAMING_SNAKE_CASE : List[str] = expand_ratios
_SCREAMING_SNAKE_CASE : List[Any] = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dim
_SCREAMING_SNAKE_CASE : List[Any] = pooling_type
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = batch_norm_eps
_SCREAMING_SNAKE_CASE : List[str] = batch_norm_momentum
_SCREAMING_SNAKE_CASE : Any = drop_connect_rate
_SCREAMING_SNAKE_CASE : Optional[int] = sum(__lowerCamelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align'
__snake_case = True
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=6_4_0 , __lowerCamelCase=1.0 , __lowerCamelCase=0.02 , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
if text_config is None:
_SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AlignTextConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = AlignVisionConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : Any = temperature_init_value
_SCREAMING_SNAKE_CASE : int = initializer_range
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : int = self.__class__.model_type
return output | 249 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( _UpperCAmelCase):
_a = '''vivit'''
def __init__( self: List[str] , _lowerCAmelCase: Optional[Any]=2_24 , _lowerCAmelCase: List[str]=32 , _lowerCAmelCase: Any=[2, 16, 16] , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: List[Any]=7_68 , _lowerCAmelCase: str=12 , _lowerCAmelCase: Union[str, Any]=12 , _lowerCAmelCase: Tuple=30_72 , _lowerCAmelCase: Tuple="gelu_fast" , _lowerCAmelCase: List[str]=0.0 , _lowerCAmelCase: Optional[Any]=0.0 , _lowerCAmelCase: Optional[int]=0.02 , _lowerCAmelCase: int=1e-0_6 , _lowerCAmelCase: Union[str, Any]=True , **_lowerCAmelCase: Optional[int] , ):
lowercase :Tuple = hidden_size
lowercase :Any = num_hidden_layers
lowercase :Any = num_attention_heads
lowercase :int = intermediate_size
lowercase :Optional[Any] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :Optional[Any] = attention_probs_dropout_prob
lowercase :Dict = initializer_range
lowercase :str = layer_norm_eps
lowercase :Union[str, Any] = image_size
lowercase :Tuple = num_frames
lowercase :Tuple = tubelet_size
lowercase :Union[str, Any] = num_channels
lowercase :str = qkv_bias
super().__init__(**lowercase_ )
| 708 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase__ ( ):
lowercase :List[str] = torch.nn.Linear(2, 4 )
lowercase :List[Any] = torch.optim.AdamW(model.parameters(), lr=1.0 )
lowercase :int = torch.optim.lr_scheduler.OneCycleLR(lowerCamelCase, max_lr=0.01, steps_per_epoch=2, epochs=1 )
lowercase :Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase :List[str] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase__ ( lowerCamelCase ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCamelCase )
class __lowerCAmelCase ( lowerCAmelCase):
@require_cuda
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowerCAmelCase ):
lowercase :Any = Accelerator(cpu=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = Accelerator()
lowercase :Dict = GradientState()
assert state.num_steps == 1
lowercase :Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase :List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :str = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :Any = create_components()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Tuple = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :List[str] = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE ( self: int ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCAmelCase: List[str] , **_lowerCAmelCase: Optional[int] ):
pass
with patch("torch.cuda.set_device" , _lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowercase :List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Tuple = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :Optional[Any] = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :Tuple = get_signature(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Dict = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :int = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :int = get_signature(_lowerCAmelCase )
# saving hook
def save_config(_lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] ):
lowercase :Dict = {"class_name": models[0].__class__.__name__}
with open(os.path.join(_lowerCAmelCase , "data.json" ) , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# loading hook
def load_config(_lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ):
with open(os.path.join(_lowerCAmelCase , "data.json" ) , "r" ) as f:
lowercase :int = json.load(_lowerCAmelCase )
lowercase :Optional[int] = config["class_name"]
lowercase :Optional[Any] = accelerator.register_save_state_pre_hook(_lowerCAmelCase )
lowercase :Tuple = accelerator.register_load_state_pre_hook(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase :Optional[int] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase :str = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :List[str] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :List[Any] = create_components()
lowercase :List[str] = None
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :Union[str, Any] = create_components()
lowercase :Union[str, Any] = [1, 2, 3]
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :List[str] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
from transformers import AutoModelForCausalLM
lowercase :Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map={"": 0} , )
lowercase :int = Accelerator()
# This should work
lowercase :Optional[int] = accelerator.prepare(_lowerCAmelCase )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self: List[str] ):
from transformers import AutoModelForCausalLM
lowercase :Optional[Any] = Accelerator()
with init_empty_weights():
lowercase :Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase :Dict = infer_auto_device_map(_lowerCAmelCase )
lowercase :int = "cpu"
lowercase :str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=_lowerCAmelCase , load_in_abit=_lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=_lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(_lowerCAmelCase ):
lowercase :Dict = accelerator.prepare(_lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Any ):
from transformers import AutoModelForCausalLM
lowercase :List[str] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase :str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase :Optional[Any] = infer_auto_device_map(_lowerCAmelCase )
lowercase :Any = 1
lowercase :str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map=_lowerCAmelCase , )
lowercase :List[str] = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowerCAmelCase ):
lowercase :Optional[Any] = accelerator.prepare(_lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase :List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowercase :List[Any] = infer_auto_device_map(_lowerCAmelCase )
lowercase :Optional[int] = 1
lowercase :Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map=_lowerCAmelCase , )
lowercase :Union[str, Any] = Accelerator()
# This should work
lowercase :List[Any] = accelerator.prepare(_lowerCAmelCase )
@require_cuda
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Optional[int] = torch.nn.Linear(10 , 10 )
lowercase :Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase :List[Any] = Accelerator(cpu=_lowerCAmelCase )
lowercase :List[str] = accelerator.prepare(_lowerCAmelCase )
| 453 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=8 ):
__lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any]=5_1_2 , lowerCamelCase_ : int=5_1_2 ):
__lowercase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__lowercase = np.array(pil_image.convert('''RGB''' ) )
__lowercase = arr.astype(np.floataa ) / 1_27.5 - 1
__lowercase = np.transpose(UpperCamelCase_ , [2, 0, 1] )
__lowercase = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 )
return image
class __lowercase ( snake_case__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__snake_case ,scheduler=__snake_case ,movq=__snake_case ,)
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = min(int(num_inference_steps * strength ) ,__snake_case )
__lowercase = max(num_inference_steps - init_timestep ,0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> Any:
'''simple docstring'''
if not isinstance(__snake_case ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}" )
__lowercase = image.to(device=__snake_case ,dtype=__snake_case )
__lowercase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__lowercase = image
else:
if isinstance(__snake_case ,__snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(__snake_case ,__snake_case ):
__lowercase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
__lowercase = torch.cat(__snake_case ,dim=0 )
else:
__lowercase = self.movq.encode(__snake_case ).latent_dist.sample(__snake_case )
__lowercase = self.movq.config.scaling_factor * init_latents
__lowercase = torch.cat([init_latents] ,dim=0 )
__lowercase = init_latents.shape
__lowercase = randn_tensor(__snake_case ,generator=__snake_case ,device=__snake_case ,dtype=__snake_case )
# get latents
__lowercase = self.scheduler.add_noise(__snake_case ,__snake_case ,__snake_case )
__lowercase = init_latents
return latents
def _UpperCAmelCase (self ,_lowerCamelCase=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowercase = torch.device(f"cuda:{gpu_id}" )
__lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case ,__snake_case )
def _UpperCAmelCase (self ,_lowerCamelCase=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowercase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' ,silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(__snake_case ,__snake_case ,prev_module_hook=__snake_case )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet ,'''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case ,'''_hf_hook''' )
and hasattr(module._hf_hook ,'''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 512 ,_lowerCamelCase = 512 ,_lowerCamelCase = 100 ,_lowerCamelCase = 4.0 ,_lowerCamelCase = 0.3 ,_lowerCamelCase = 1 ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,) -> Dict:
'''simple docstring'''
__lowercase = self._execution_device
__lowercase = guidance_scale > 1.0
if isinstance(__snake_case ,__snake_case ):
__lowercase = torch.cat(__snake_case ,dim=0 )
__lowercase = image_embeds.shape[0]
if isinstance(__snake_case ,__snake_case ):
__lowercase = torch.cat(__snake_case ,dim=0 )
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(__snake_case ,dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(__snake_case ,dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__snake_case )
if not isinstance(__snake_case ,__snake_case ):
__lowercase = [image]
if not all(isinstance(__snake_case ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(__snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
__lowercase = torch.cat([prepare_image(__snake_case ,__snake_case ,__snake_case ) for i in image] ,dim=0 )
__lowercase = image.to(dtype=image_embeds.dtype ,device=__snake_case )
__lowercase = self.movq.encode(__snake_case )['''latents''']
__lowercase = latents.repeat_interleave(__snake_case ,dim=0 )
self.scheduler.set_timesteps(__snake_case ,device=__snake_case )
__lowercase , __lowercase = self.get_timesteps(__snake_case ,__snake_case ,__snake_case )
__lowercase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__lowercase , __lowercase = downscale_height_and_width(__snake_case ,__snake_case ,self.movq_scale_factor )
__lowercase = self.prepare_latents(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,image_embeds.dtype ,__snake_case ,__snake_case )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {'''image_embeds''': image_embeds}
__lowercase = self.unet(
sample=__snake_case ,timestep=__snake_case ,encoder_hidden_states=__snake_case ,added_cond_kwargs=__snake_case ,return_dict=__snake_case ,)[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] ,dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
__snake_case ,__snake_case ,__snake_case ,generator=__snake_case ,)[0]
# post-processing
__lowercase = self.movq.decode(__snake_case ,force_not_quantize=__snake_case )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 502 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3_0 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_0 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def a_ ( self , __snake_case , __snake_case ):
snake_case = FlaxViTModel(config=__snake_case )
snake_case = model(__snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case = (self.image_size, self.image_size)
snake_case = (self.patch_size, self.patch_size)
snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case ):
snake_case = self.type_sequence_label_size
snake_case = FlaxViTForImageClassification(config=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = FlaxViTForImageClassification(__snake_case )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(__snake_case )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a_ ( self ):
snake_case = FlaxViTModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def a_ ( self ):
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case = self._prepare_for_class(__snake_case , __snake_case )
snake_case = model_class(__snake_case )
@jax.jit
def model_jitted(__snake_case , **__snake_case ):
return model(pixel_values=__snake_case , **__snake_case )
with self.subTest('''JIT Enabled''' ):
snake_case = model_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case = model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self ):
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(__snake_case )
| 550 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase__ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
lowerCamelCase__ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
lowerCamelCase__ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
lowerCamelCase__ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
lowerCamelCase__ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
lowerCamelCase__ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
lowerCamelCase__ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a , __a = randrange(len(_SCREAMING_SNAKE_CASE ) ), randrange(len(_SCREAMING_SNAKE_CASE ) )
__a = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(_SCREAMING_SNAKE_CASE ))
@pytest.mark.parametrize("""hand, expected""" , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
assert PokerHand(_SCREAMING_SNAKE_CASE )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert PokerHand(_SCREAMING_SNAKE_CASE )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = PokerHand(_SCREAMING_SNAKE_CASE )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
assert PokerHand(_SCREAMING_SNAKE_CASE )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
assert PokerHand(_SCREAMING_SNAKE_CASE )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = [PokerHand(_SCREAMING_SNAKE_CASE ) for hand in SORTED_HANDS]
__a = poker_hands.copy()
shuffle(_SCREAMING_SNAKE_CASE )
__a = chain(sorted(_SCREAMING_SNAKE_CASE ) )
for index, hand in enumerate(_SCREAMING_SNAKE_CASE ):
assert hand == poker_hands[index]
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=_SCREAMING_SNAKE_CASE )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = PokerHand("""2C 4S AS 3D 5C""" )
__a = True
__a = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = 0
__a = os.path.abspath(os.path.dirname(_SCREAMING_SNAKE_CASE ) )
__a = os.path.join(_SCREAMING_SNAKE_CASE , """poker_hands.txt""" )
with open(_SCREAMING_SNAKE_CASE ) as file_hand:
for line in file_hand:
__a = line[:14].strip()
__a = line[15:].strip()
__a , __a = PokerHand(_SCREAMING_SNAKE_CASE ), PokerHand(_SCREAMING_SNAKE_CASE )
__a = player.compare_with(_SCREAMING_SNAKE_CASE )
if output == "Win":
answer += 1
assert answer == 376
| 713 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Tuple , __lowercase : int = 16 , __lowercase : int = 88 , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : int = 1 , __lowercase : float = 0.0 , __lowercase : int = 32 , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : str = "geglu" , __lowercase : bool = True , __lowercase : bool = True , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = in_channels
__a = torch.nn.GroupNorm(num_groups=__lowercase , num_channels=__lowercase , eps=1E-6 , affine=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
# 3. Define transformers blocks
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , cross_attention_dim=__lowercase , activation_fn=__lowercase , attention_bias=__lowercase , double_self_attention=__lowercase , norm_elementwise_affine=__lowercase , )
for d in range(__lowercase )
] )
__a = nn.Linear(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Optional[Any] , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : List[Any]=None , __lowercase : int=1 , __lowercase : Union[str, Any]=None , __lowercase : bool = True , ):
'''simple docstring'''
__a , __a , __a , __a = hidden_states.shape
__a = batch_frames // num_frames
__a = hidden_states
__a = hidden_states[None, :].reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__a = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__a = self.norm(__lowercase )
__a = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowercase , __lowercase )
__a = self.proj_in(__lowercase )
# 2. Blocks
for block in self.transformer_blocks:
__a = block(
__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase , cross_attention_kwargs=__lowercase , class_labels=__lowercase , )
# 3. Output
__a = self.proj_out(__lowercase )
__a = (
hidden_states[None, None, :]
.reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__a = hidden_states.reshape(__lowercase , __lowercase , __lowercase , __lowercase )
__a = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowercase )
| 547 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = generate_pascal_triangle(lowerCamelCase__ )
for row_idx in range(lowerCamelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
lowerCamelCase = []
for current_row_idx in range(lowerCamelCase__ ):
lowerCamelCase = populate_current_row(lowerCamelCase__ , lowerCamelCase__ )
triangle.append(lowerCamelCase__ )
return triangle
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase , lowerCamelCase = 1, 1
for current_col_idx in range(1 , lowerCamelCase__ ):
calculate_current_element(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return current_row
def __lowerCamelCase ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int , lowerCamelCase__ : int , ):
'''simple docstring'''
lowerCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase = above_to_left_elt + above_to_right_elt
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
lowerCamelCase = [[1]]
for row_index in range(1 , lowerCamelCase__ ):
lowerCamelCase = [0] + result[-1] + [0]
lowerCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase = sum(divmod(lowerCamelCase__ , 2 ) )
lowerCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase = row_first_half + row_second_half
result.append(lowerCamelCase__ )
return result
def __lowerCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase__ : Callable , lowerCamelCase__ : int ) -> None:
lowerCamelCase = f'{func.__name__}({value})'
lowerCamelCase = timeit(f'__main__.{call}' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase__ , lowerCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 457 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase : Any = re.compile(r"([A-Z]+)([A-Z][a-z])")
UpperCAmelCase : Optional[Any] = re.compile(r"([a-z\d])([A-Z])")
UpperCAmelCase : Optional[Any] = re.compile(r"(?<!_)_(?!_)")
UpperCAmelCase : Any = re.compile(r"(_{2,})")
UpperCAmelCase : List[Any] = r"^\w+(\.\w+)*$"
UpperCAmelCase : Any = r"<>:/\|?*"
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , lowerCamelCase__ )
lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , lowerCamelCase__ )
return name.lower()
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = _single_underscore_re.split(lowerCamelCase__ )
lowerCamelCase = [_multiple_underscores_re.split(lowerCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase__ ) if n != """""" )
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
if os.path.basename(lowerCamelCase__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int ):
'''simple docstring'''
if os.path.basename(lowerCamelCase__ ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , lowerCamelCase__ ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(lowerCamelCase__ )}-{split}'
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=None ):
'''simple docstring'''
lowerCamelCase = filename_prefix_for_split(lowerCamelCase__ , lowerCamelCase__ )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
lowerCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
return f'{filepath}*'
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=None , lowerCamelCase__ : str=None ):
'''simple docstring'''
lowerCamelCase = filename_prefix_for_split(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if shard_lengths:
lowerCamelCase = len(lowerCamelCase__ )
lowerCamelCase = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(lowerCamelCase__ )]
if filetype_suffix:
lowerCamelCase = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
lowerCamelCase = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 457 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
def A__ ( *_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
| 150 | 1 |
import math
def __A ( _A , _A = 0 , _A = 0 ):
"""simple docstring"""
__a = end or len(_A )
for i in range(_A , _A ):
__a = i
__a = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__a = array[temp_index - 1]
temp_index -= 1
__a = temp_index_value
return array
def __A ( _A , _A , _A ): # Max Heap
"""simple docstring"""
__a = index
__a = 2 * index + 1 # Left Node
__a = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__a = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__a = right_index
if largest != index:
__a = array[largest], array[index]
heapify(_A , _A , _A )
def __A ( _A ):
"""simple docstring"""
__a = len(_A )
for i in range(n // 2 , -1 , -1 ):
heapify(_A , _A , _A )
for i in range(n - 1 , 0 , -1 ):
__a = array[0], array[i]
heapify(_A , 0 , _A )
return array
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = low
__a = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__a = array[j], array[i]
i += 1
def __A ( _A ):
"""simple docstring"""
if len(_A ) == 0:
return array
__a = 2 * math.ceil(math.loga(len(_A ) ) )
__a = 16
return intro_sort(_A , 0 , len(_A ) , _A , _A )
def __A ( _A , _A , _A , _A , _A ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_A )
max_depth -= 1
__a = median_of_a(_A , _A , start + ((end - start) // 2) + 1 , end - 1 )
__a = partition(_A , _A , _A , _A )
intro_sort(_A , _A , _A , _A , _A )
__a = p
return insertion_sort(_A , _A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Any = input("""Enter numbers separated by a comma : """).strip()
SCREAMING_SNAKE_CASE : List[Any] = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 197 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
def decorator(lowerCamelCase : List[Any] ):
__lowerCAmelCase = getattr(lowerCamelCase , "handle_key" , [] )
handle += [key]
setattr(lowerCamelCase , "handle_key" , lowerCamelCase )
return func
return decorator
def __lowerCAmelCase ( *lowerCamelCase : List[str] ):
'''simple docstring'''
def decorator(lowerCamelCase : List[Any] ):
__lowerCAmelCase = getattr(lowerCamelCase , "handle_key" , [] )
handle += keys
setattr(lowerCamelCase , "handle_key" , lowerCamelCase )
return func
return decorator
class UpperCAmelCase__ ( UpperCamelCase__ ):
def __new__( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = super().__new__(cls , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not hasattr(UpperCamelCase , "key_handler" ):
setattr(UpperCamelCase , "key_handler" , {} )
setattr(UpperCamelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
__lowerCAmelCase = getattr(UpperCamelCase , "handle_key" , [] )
for key in handled_keys:
__lowerCAmelCase = value
return new_cls
@staticmethod
def UpperCAmelCase_ ( cls ) -> Dict:
__lowerCAmelCase = get_character()
if char != KEYMAP["undefined"]:
__lowerCAmelCase = ord(UpperCamelCase )
__lowerCAmelCase = cls.key_handler.get(UpperCamelCase )
if handler:
__lowerCAmelCase = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : List[str] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 719 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 39 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __snake_case , unittest.TestCase ):
lowerCamelCase : Any =GPTSanJapaneseTokenizer
lowerCamelCase : List[str] =False
lowerCamelCase : List[str] ={'do_clean_text': False, 'add_prefix_space': False}
def lowerCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowerCAmelCase_ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCAmelCase ) )
def lowerCamelCase_ ( self , **UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowerCAmelCase_ = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。 こんばんは、㔺界。'''
lowerCAmelCase_ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowerCAmelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
lowerCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowerCAmelCase_ = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowerCAmelCase_ = tokenizer.encode(UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。'''
lowerCAmelCase_ = '''こんばんは、㔺界。😀'''
lowerCAmelCase_ = '''こんにちは、世界。こんばんは、世界。😀'''
lowerCAmelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
lowerCAmelCase_ = tokenizer.encode(UpperCAmelCase , prefix_text=UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase_ = '''こんにちは、世界。'''
lowerCAmelCase_ = '''こんばんは、㔺界。😀'''
lowerCAmelCase_ = len(tokenizer.encode(UpperCAmelCase ) ) - 2
lowerCAmelCase_ = len(tokenizer.encode(UpperCAmelCase ) ) - 2
lowerCAmelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ = tokenizer(UpperCAmelCase , prefix_text=UpperCAmelCase ).token_type_ids
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase_ = tokenizer.encode('''あンいワ''' )
lowerCAmelCase_ = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
lowerCAmelCase_ = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , tokenizer.decode(UpperCAmelCase ) )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase_ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowerCAmelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase )
lowerCAmelCase_ = tokenizer.batch_encode_plus(UpperCAmelCase , padding=UpperCAmelCase )
# fmt: off
lowerCAmelCase_ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , UpperCAmelCase )
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self ):
'''simple docstring'''
pass | 552 |
"""simple docstring"""
def UpperCAmelCase ( _lowercase : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = 1, 1
lowerCAmelCase_ = []
for i in range(1 , n + 1 ):
lowerCAmelCase_ = prev_numerator + 2 * prev_denominator
lowerCAmelCase_ = prev_numerator + prev_denominator
if len(str(_lowercase ) ) > len(str(_lowercase ) ):
result.append(_lowercase )
lowerCAmelCase_ = numerator
lowerCAmelCase_ = denominator
return len(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 552 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = len(_lowerCamelCase )
for i in range(length - 1 ):
_lowerCamelCase : str = i
for k in range(i + 1 , _lowerCamelCase ):
if collection[k] < collection[least]:
_lowerCamelCase : Optional[Any] = k
if least != i:
_lowerCamelCase, _lowerCamelCase : List[str] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted)) | 386 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase = 10 , _lowerCamelCase = 22 ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = range(1 , _lowerCamelCase )
_lowerCamelCase : Tuple = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''') | 386 | 1 |
import csv
import tweepy
# Twitter API credentials
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """"""
def lowercase( UpperCamelCase_ ) -> None:
'''simple docstring'''
# authorize twitter, initialize tweepy
UpperCamelCase = tweepy.OAuthHandler(UpperCamelCase_ , UpperCamelCase_ )
auth.set_access_token(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = tweepy.API(UpperCamelCase_ )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase = api.user_timeline(screen_name=UpperCamelCase_ , count=200 )
# save most recent tweets
alltweets.extend(UpperCamelCase_ )
# save the id of the oldest tweet less one
UpperCamelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase_ ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase = api.user_timeline(
screen_name=UpperCamelCase_ , count=200 , max_id=UpperCamelCase_ )
# save most recent tweets
alltweets.extend(UpperCamelCase_ )
# update the id of the oldest tweet less one
UpperCamelCase = alltweets[-1].id - 1
print(f"""...{len(UpperCamelCase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , """w""" ) as f:
UpperCamelCase = csv.writer(UpperCamelCase_ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(UpperCamelCase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 537 | import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , lowerCamelCase_ )
UpperCamelCase = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
| 537 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
UpperCamelCase = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
UpperCamelCase = {
"""jukebox""": 512,
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=["v3", "v2", "v2"] , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE="<|endoftext|>" , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
A_ : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , n_genres=_SCREAMING_SNAKE_CASE , version=_SCREAMING_SNAKE_CASE , max_n_lyric_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : List[str] = version
A_ : str = max_n_lyric_tokens
A_ : Optional[int] = n_genres
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
A_ : Any = json.load(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
A_ : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
A_ : Tuple = json.load(_SCREAMING_SNAKE_CASE )
A_ : str = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
A_ : Any = oov.replace(R'''\-\'''' , R'''\-+\'''' )
A_ : List[Any] = regex.compile(_SCREAMING_SNAKE_CASE )
A_ : Any = {v: k for k, v in self.artists_encoder.items()}
A_ : List[Any] = {v: k for k, v in self.genres_encoder.items()}
A_ : Tuple = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _snake_case ( self )->str:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _snake_case ( self )->str:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : List[str] = [self.artists_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for artist in list_artists]
for genres in range(len(_SCREAMING_SNAKE_CASE ) ):
A_ : Any = [self.genres_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for genre in list_genres[genres]]
A_ : Tuple = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
A_ : int = [[self.lyrics_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return list(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : int = self.prepare_for_tokenization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self._tokenize(_SCREAMING_SNAKE_CASE )
return artist, genre, lyrics
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )->Tuple[str, str, str, Dict[str, Any]]:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
A_ : Tuple = artists[idx].lower()
A_ : int = [genres[idx].lower()]
else:
A_ : Tuple = self._normalize(artists[idx] ) + '''.v2'''
A_ : int = [
self._normalize(_SCREAMING_SNAKE_CASE ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
A_ : Optional[int] = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
A_ : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
A_ : str = {vocab[index]: index + 1 for index in range(len(_SCREAMING_SNAKE_CASE ) )}
A_ : Optional[int] = 0
A_ : Tuple = len(_SCREAMING_SNAKE_CASE ) + 1
A_ : List[Any] = self.vocab
A_ : Dict = {v: k for k, v in self.vocab.items()}
A_ : Dict = ''''''
else:
A_ : Optional[Any] = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
A_ : List[Any] = self._run_strip_accents(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = lyrics.replace('''\\''' , '''\n''' )
A_ : Dict = self.out_of_vocab.sub('''''' , _SCREAMING_SNAKE_CASE ), [], []
return artists, genres, lyrics
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Tuple = unicodedata.normalize('''NFD''' , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = []
for char in text:
A_ : int = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat == "Mn":
continue
output.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : int = (
[chr(_SCREAMING_SNAKE_CASE ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
A_ : Optional[int] = frozenset(_SCREAMING_SNAKE_CASE )
A_ : int = re.compile(R'''_+''' )
A_ : str = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
A_ : str = pattern.sub('''_''' , _SCREAMING_SNAKE_CASE ).strip('''_''' )
return text
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return " ".join(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False )->int:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = TensorType(_SCREAMING_SNAKE_CASE )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
A_ : List[Any] = tf.constant
A_ : Union[str, Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
A_ : Any = torch.tensor
A_ : List[str] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
A_ : List[str] = jnp.array
A_ : Optional[int] = _is_jax
else:
A_ : Optional[Any] = np.asarray
A_ : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
A_ : Tuple = [inputs]
if not is_tensor(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = as_tensor(_SCREAMING_SNAKE_CASE )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE="pt" )->BatchEncoding:
'''simple docstring'''
A_ : Dict = [0, 0, 0]
A_ : Dict = [artist] * len(self.version )
A_ : str = [genres] * len(self.version )
A_ : Union[str, Any] = self.tokenize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : str = self._convert_token_to_id(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [-INFINITY] * len(full_tokens[-1] )
A_ : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_SCREAMING_SNAKE_CASE )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) )
A_ : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) )
A_ : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) )
return (artists_file, genres_file, lyrics_file)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = self.artists_decoder.get(_SCREAMING_SNAKE_CASE )
A_ : Tuple = [self.genres_decoder.get(_SCREAMING_SNAKE_CASE ) for genre in genres_index]
A_ : List[str] = [self.lyrics_decoder.get(_SCREAMING_SNAKE_CASE ) for character in lyric_index]
return artist, genres, lyrics
| 707 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase = dict(zip(vocab, range(len(vocab))))
UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(tmpdirname)
UpperCamelCase = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
UpperCamelCase = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
UpperCamelCase = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
UpperCamelCase = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
UpperCamelCase = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCamelCase = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 152 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((a__) , (a__)) : List[Any] = extended_euclid(lowerCAmelCase__ , a % b )
a__ : str = a // b
return (y, x - k * y)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = na * na
a__ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Optional[Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[int] = (b % n + n) % n
return b
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ , a__ : List[Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = na * na
a__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True) | 642 |
"""simple docstring"""
import functools
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
a__ : Any = len(lowerCAmelCase__ )
a__ : Optional[int] = len(lowerCAmelCase__ )
@functools.cache
def min_distance(lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
a__ : List[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCAmelCase__ ) , 1 + min_distance(lowerCAmelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 642 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCamelCase__ = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE( snake_case_ : Any , snake_case_ : int , snake_case_ : str , snake_case_ : int=None , snake_case_ : Union[str, Any]=None ) ->str:
'''simple docstring'''
# Recurse if needed
if "." in tensor_name:
_lowercase : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
_lowercase : Any = getattr(snake_case_ , snake_case_ )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_lowercase : int = new_module
_lowercase : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_lowercase : Any = tensor_name in module._buffers
_lowercase : Dict = getattr(snake_case_ , snake_case_ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_lowercase : List[Any] = False
_lowercase : Any = False
if is_buffer or not is_bitsandbytes_available():
_lowercase : Optional[Any] = False
_lowercase : int = False
else:
_lowercase : Tuple = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowercase : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowercase : int = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowercase : Any = old_value.to(snake_case_ )
elif isinstance(snake_case_ , torch.Tensor ):
_lowercase : Dict = value.to('''cpu''' )
if value.dtype == torch.inta:
_lowercase : Dict = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
_lowercase : Union[str, Any] = torch.tensor(snake_case_ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case_ ) and fpaa_statistics is None:
_lowercase : List[str] = new_value.T
_lowercase : Union[str, Any] = old_value.__dict__
if is_abit:
_lowercase : str = bnb.nn.IntaParams(snake_case_ , requires_grad=snake_case_ , **snake_case_ ).to(snake_case_ )
elif is_abit:
_lowercase : Tuple = bnb.nn.Paramsabit(snake_case_ , requires_grad=snake_case_ , **snake_case_ ).to(snake_case_ )
_lowercase : Any = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case_ ) )
else:
if value is None:
_lowercase : str = old_value.to(snake_case_ )
elif isinstance(snake_case_ , torch.Tensor ):
_lowercase : Any = value.to(snake_case_ )
else:
_lowercase : Optional[int] = torch.tensor(snake_case_ , device=snake_case_ )
if is_buffer:
_lowercase : Dict = new_value
else:
_lowercase : List[str] = nn.Parameter(snake_case_ , requires_grad=old_value.requires_grad )
_lowercase : Any = new_value
def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : str=None , snake_case_ : List[Any]=False ) ->int:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_lowercase : Tuple = []
current_key_name.append(snake_case_ )
if (isinstance(snake_case_ , nn.Linear ) or isinstance(snake_case_ , snake_case_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case_ , snake_case_ ):
_lowercase : Tuple = module.weight.shape
else:
_lowercase : Any = module.in_features
_lowercase : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowercase : Union[str, Any] = bnb.nn.LinearabitLt(
snake_case_ , snake_case_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowercase : Any = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowercase : str = bnb.nn.Linearabit(
snake_case_ , snake_case_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowercase : Optional[Any] = True
# Store the module class in case we need to transpose the weight later
_lowercase : Optional[int] = type(snake_case_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case_ )
if len(list(module.children() ) ) > 0:
_lowercase : Dict = _replace_with_bnb_linear(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , has_been_replaced=snake_case_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _SCREAMING_SNAKE_CASE( snake_case_ : Dict , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Any=None ) ->int:
'''simple docstring'''
_lowercase : Tuple = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
_lowercase : Tuple = _replace_with_bnb_linear(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _SCREAMING_SNAKE_CASE( *snake_case_ : Union[str, Any] , **snake_case_ : List[str] ) ->Tuple:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case_ , )
return replace_with_bnb_linear(*snake_case_ , **snake_case_ )
def _SCREAMING_SNAKE_CASE( *snake_case_ : Optional[Any] , **snake_case_ : Tuple ) ->str:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case_ , )
return set_module_quantized_tensor_to_device(*snake_case_ , **snake_case_ )
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[int] ) ->str:
'''simple docstring'''
_lowercase : List[Any] = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowercase : str = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_ , snake_case_ ):
_lowercase : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowercase : Union[str, Any] = sum(snake_case_ , [] )
_lowercase : Tuple = len(snake_case_ ) > 0
# Check if it is a base model
_lowercase : List[str] = not hasattr(snake_case_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowercase : Dict = list(model.named_children() )
_lowercase : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
_lowercase : str = set(snake_case_ ) - set(snake_case_ )
_lowercase : Any = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
_lowercase : Dict = ['''.weight''', '''.bias''']
_lowercase : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowercase : int = name.replace(snake_case_ , '''''' )
filtered_module_names.append(snake_case_ )
return filtered_module_names
| 718 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = StableDiffusionSAGPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = False
def __lowercase ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_lowercase : int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_lowercase : Dict = CLIPTextModel(UpperCamelCase_ )
_lowercase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowercase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowercase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=0 ) -> Any:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
_lowercase : Any = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Union[str, Any] = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_lowercase : int = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Union[str, Any] = '''.'''
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : Tuple = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_lowercase : int = output.images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : List[Any] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : str = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_lowercase : str = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = '''.'''
_lowercase : Tuple = torch.manual_seed(0 )
_lowercase : int = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_lowercase : Union[str, Any] = output.images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : List[str] = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowercase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_lowercase : List[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Optional[int] = '''.'''
_lowercase : Any = torch.manual_seed(0 )
_lowercase : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_lowercase : Dict = output.images
assert image.shape == (1, 512, 768, 3)
| 411 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase : List[str] = '''bart'''
lowercase : str = True
@st.cache(allow_output_mutation=_a )
def lowerCAmelCase__ ( ):
if LOAD_DENSE_INDEX:
snake_case_ : int = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case_ : int = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case_ : List[str] = qar_model.eval()
else:
snake_case_ , snake_case_ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case_ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case_ : Any = sas_model.eval()
else:
snake_case_ , snake_case_ : int = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_a )
def lowerCAmelCase__ ( ):
if LOAD_DENSE_INDEX:
snake_case_ : List[str] = faiss.StandardGpuResources()
snake_case_ : str = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case_ : List[Any] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_28) , )
snake_case_ : int = faiss.IndexFlatIP(1_28 )
snake_case_ : List[Any] = faiss.index_cpu_to_gpu(_a , 1 , _a )
wikiaab_gpu_index_flat.add(_a ) # TODO fix for larger GPU
else:
snake_case_ , snake_case_ : Optional[int] = (None, None)
snake_case_ : str = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_a )
def lowerCAmelCase__ ( ):
snake_case_ : List[str] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case_ : List[str] = elia["train_eli5"]
snake_case_ : Dict = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_28) )
snake_case_ : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(_a )
return (elia_train, eli5_train_q_index)
lowercase ,lowercase ,lowercase : Optional[Any] = load_indexes()
lowercase ,lowercase ,lowercase ,lowercase : Union[str, Any] = load_models()
lowercase ,lowercase : Optional[Any] = load_train_data()
def lowerCAmelCase__ ( _a : int , _a : Dict=10 ):
snake_case_ : Any = embed_questions_for_retrieval([question] , _a , _a )
snake_case_ , snake_case_ : Union[str, Any] = eli5_train_q_index.search(_a , _a )
snake_case_ : List[Any] = [elia_train[int(_a )] for i in I[0]]
return nn_examples
def lowerCAmelCase__ ( _a : int , _a : Optional[Any]="wiki40b" , _a : Any="dense" , _a : List[str]=10 ):
if source == "none":
snake_case_ , snake_case_ : List[Any] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case_ , snake_case_ : Optional[int] = query_qa_dense_index(
_a , _a , _a , _a , _a , _a )
else:
snake_case_ , snake_case_ : Optional[Any] = query_es_index(
_a , _a , index_name="english_wiki40b_snippets_100w" , n_results=_a , )
snake_case_ : int = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case_ : List[str] = "question: {} context: {}".format(_a , _a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _a : None),
} )
def lowerCAmelCase__ ( _a : Optional[int] , _a : str , _a : Union[str, Any] , _a : List[str]=64 , _a : str=2_56 , _a : Union[str, Any]=False , _a : Optional[Any]=2 , _a : Tuple=0.95 , _a : str=0.8 ):
with torch.no_grad():
snake_case_ : Tuple = qa_sas_generate(
_a , _a , _a , num_answers=1 , num_beams=_a , min_len=_a , max_len=_a , do_sample=_a , temp=_a , top_p=_a , top_k=_a , max_input_length=10_24 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
lowercase : Tuple = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
lowercase : Union[str, Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase : Tuple = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase : List[Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
lowercase : List[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
lowercase : Any = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
lowercase : Any = action_list.index(action_st)
lowercase : Optional[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
lowercase : str = show_type == '''Show full text of passages'''
else:
lowercase : Any = 3
lowercase : Any = True
lowercase : Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
lowercase : Tuple = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
lowercase : Optional[int] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
lowercase : int = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
lowercase : List[Any] = '''wiki40b'''
lowercase : Optional[int] = '''dense'''
lowercase : Optional[int] = '''beam'''
lowercase : Dict = 2
lowercase : Optional[int] = 64
lowercase : str = 2_56
lowercase : Optional[Any] = None
lowercase : Union[str, Any] = None
lowercase : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
lowercase : Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
lowercase : Union[str, Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
lowercase : str = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
lowercase : Optional[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
lowercase : Dict = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase : Optional[int] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowercase : str = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowercase : Optional[Any] = None
# start main text
lowercase : Any = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
lowercase : Dict = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase : Tuple = st.text_input('''Enter your question here:''', '''''')
else:
lowercase : List[str] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase ,lowercase : int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
lowercase ,lowercase : List[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
lowercase : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase : int = support_list[:10]
lowercase : str = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
lowercase ,lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase ,lowercase : List[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
lowercase : Optional[int] = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
lowercase : Tuple = res[1].strip()
if sec_titles == "":
lowercase : str = '''[{}]({})'''.format(res[0], wiki_url)
else:
lowercase : int = sec_titles.split(''' & ''')
lowercase : Dict = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
lowercase : str = find_nearest_training(question)
lowercase : List[str] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
lowercase : Dict = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
lowercase : Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 568 |
from itertools import permutations
def lowerCAmelCase__ ( _a : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(_a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( _a : int = 10 ):
return sum(
int("".join(map(_a , _a ) ) )
for num in permutations(range(_a ) )
if is_substring_divisible(_a ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCamelCase__ : int = logging.get_logger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase__ : str = None , lowerCAmelCase__ : uuid.UUID = None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : int=None ):
"""simple docstring"""
if not conversation_id:
__SCREAMING_SNAKE_CASE : int = uuid.uuida()
if past_user_inputs is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
if generated_responses is None:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : uuid.UUID = conversation_id
__SCREAMING_SNAKE_CASE : List[str] = past_user_inputs
__SCREAMING_SNAKE_CASE : List[str] = generated_responses
__SCREAMING_SNAKE_CASE : Optional[str] = text
def __eq__( self : List[str] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__SCREAMING_SNAKE_CASE : Optional[Any] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__SCREAMING_SNAKE_CASE : str = text
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__SCREAMING_SNAKE_CASE : str = None
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
self.generated_responses.append(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__SCREAMING_SNAKE_CASE : Union[str, Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
lowerCamelCase__ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : str ):
"""simple docstring"""
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
if self.tokenizer.pad_token_id is None:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.eos_token
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : List[str] = {}
if min_length_for_response is not None:
__SCREAMING_SNAKE_CASE : int = min_length_for_response
if minimum_tokens is not None:
__SCREAMING_SNAKE_CASE : List[Any] = minimum_tokens
if "max_length" in generate_kwargs:
__SCREAMING_SNAKE_CASE : Tuple = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , lowerCAmelCase__ : Union[Conversation, List[Conversation]] , lowerCAmelCase__ : Optional[Any]=0 , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = super().__call__(lowerCAmelCase__ , num_workers=lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Conversation , lowerCAmelCase__ : str=3_2 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer._build_conversation_input_ids(lowerCAmelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__SCREAMING_SNAKE_CASE : Any = self._legacy_parse_and_tokenize(lowerCAmelCase__ )
if self.framework == "pt":
__SCREAMING_SNAKE_CASE : Any = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=1_0 , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__SCREAMING_SNAKE_CASE : Any = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__SCREAMING_SNAKE_CASE : Tuple = max_length - minimum_tokens
__SCREAMING_SNAKE_CASE : Any = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__SCREAMING_SNAKE_CASE : List[str] = model_inputs["""attention_mask"""][:, -trim:]
__SCREAMING_SNAKE_CASE : Optional[int] = model_inputs.pop("""conversation""" )
__SCREAMING_SNAKE_CASE : Optional[int] = max_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.model.config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
else:
__SCREAMING_SNAKE_CASE : Tuple = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = model_outputs["""output_ids"""]
__SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCAmelCase__ )
return conversation
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Conversation ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.tokenizer.eos_token_id
__SCREAMING_SNAKE_CASE : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE : Any = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 178 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_heads""" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int=1_3 , lowerCAmelCase__ : Union[str, Any]=6_4 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : str=[1_6, 4_8, 9_6] , lowerCAmelCase__ : Any=[1, 3, 6] , lowerCAmelCase__ : Any=[1, 2, 1_0] , lowerCAmelCase__ : Union[str, Any]=[7, 3, 3] , lowerCAmelCase__ : Tuple=[4, 2, 2] , lowerCAmelCase__ : Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ : Dict=[2, 2, 2] , lowerCAmelCase__ : Optional[int]=[False, False, True] , lowerCAmelCase__ : List[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : List[str]=1E-12 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = image_size
__SCREAMING_SNAKE_CASE : Any = patch_sizes
__SCREAMING_SNAKE_CASE : List[Any] = patch_stride
__SCREAMING_SNAKE_CASE : List[Any] = patch_padding
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
__SCREAMING_SNAKE_CASE : Optional[int] = stride_kv
__SCREAMING_SNAKE_CASE : Union[str, Any] = depth
__SCREAMING_SNAKE_CASE : Optional[Any] = cls_token
__SCREAMING_SNAKE_CASE : List[Any] = attention_drop_rate
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = CvtModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__SCREAMING_SNAKE_CASE : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = CvtForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_A : Optional[Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_A : Dict = False
_A : Union[str, Any] = False
_A : str = False
_A : List[str] = False
_A : Optional[int] = False
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = CvtModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = CvtModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 178 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__magic_name__ : Tuple = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_tool("""text-question-answering""" )
self.tool.setup()
UpperCamelCase : Dict = load_tool("""text-question-answering""" , remote=_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.remote_tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.remote_tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
| 102 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ = logging.getLogger(__name__)
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__, '''config.json''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__, '''config.json''' ) ):
os.remove(os.path.join(UpperCamelCase__, '''config.json''' ) )
if os.path.exists(os.path.join(UpperCamelCase__, '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__, '''pytorch_model.bin''' ) ):
os.remove(os.path.join(UpperCamelCase__, '''pytorch_model.bin''' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =2
if unlogit:
SCREAMING_SNAKE_CASE__ : Any =torch.pow(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =p * torch.log(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =0
return -plogp.sum(dim=-1 )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : int=True, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.zeros(UpperCamelCase__, UpperCamelCase__ ).to(args.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.zeros(UpperCamelCase__, UpperCamelCase__ ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : str =torch.ones(UpperCamelCase__, UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
SCREAMING_SNAKE_CASE__ : Dict =0.0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__, desc='''Iteration''', disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE__ : List[str] =tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE__) , ) : List[Any] =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE__ : List[Any] =model(UpperCamelCase__, labels=UpperCamelCase__, head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =entropy(attn.detach(), UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE__ : str =2
SCREAMING_SNAKE_CASE__ : str =torch.pow(torch.pow(UpperCamelCase__, UpperCamelCase__ ).sum(-1 ), 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE__ : Tuple =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(UpperCamelCase__ )
logger.info('''Head ranked by importance scores''' )
SCREAMING_SNAKE_CASE__ : Tuple =torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device )
SCREAMING_SNAKE_CASE__ : str =torch.arange(
head_importance.numel(), device=args.device )
SCREAMING_SNAKE_CASE__ : str =head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =compute_heads_importance(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''', UpperCamelCase__, original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.ones_like(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max(1, int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE__ : str =original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE__ : Optional[int] =float('''Inf''' )
SCREAMING_SNAKE_CASE__ : List[str] =head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''', str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE__ : List[str] =new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE__ : Any =0.0
SCREAMING_SNAKE_CASE__ : str =new_head_mask.view_as(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =compute_heads_importance(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__, head_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''', UpperCamelCase__, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 1_0_0, )
logger.info('''Final head mask''' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir, '''head_mask.npy''' ), head_mask.detach().cpu().numpy() )
return head_mask
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =compute_heads_importance(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__, compute_importance=UpperCamelCase__, head_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =1 / loss
SCREAMING_SNAKE_CASE__ : Tuple =datetime.now() - before_time
SCREAMING_SNAKE_CASE__ : Optional[Any] =sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ : Optional[int] ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =[
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ : Optional[int] =datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =compute_heads_importance(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, compute_entropy=UpperCamelCase__, compute_importance=UpperCamelCase__, head_mask=UpperCamelCase__, actually_pruned=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : Dict =1 / loss
SCREAMING_SNAKE_CASE__ : Union[str, Any] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''', UpperCamelCase__, UpperCamelCase__, pruned_num_params / original_num_params * 1_0_0, )
logger.info('''Pruning: score with masking: %f score with pruning: %f''', UpperCamelCase__, UpperCamelCase__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''', original_time / new_time * 1_0_0 )
save_model(UpperCamelCase__, args.output_dir )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''', )
parser.add_argument(
'''--model_name_or_path''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''Path to pretrained model or model identifier from huggingface.co/models''', )
parser.add_argument(
'''--output_dir''', default=UpperCamelCase__, type=UpperCamelCase__, required=UpperCamelCase__, help='''The output directory where the model predictions and checkpoints will be written.''', )
# Other parameters
parser.add_argument(
'''--config_name''', default='''''', type=UpperCamelCase__, help='''Pretrained config name or path if not the same as model_name_or_path''', )
parser.add_argument(
'''--tokenizer_name''', default='''''', type=UpperCamelCase__, help='''Pretrained tokenizer name or path if not the same as model_name_or_path''', )
parser.add_argument(
'''--cache_dir''', default=UpperCamelCase__, type=UpperCamelCase__, help='''Where do you want to store the pre-trained models downloaded from s3''', )
parser.add_argument(
'''--data_subset''', type=UpperCamelCase__, default=-1, help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''', action='''store_true''', help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''', action='''store_true''', help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''', action='''store_true''', help='''Don\'t normalize all importance scores between 0 and 1''', )
parser.add_argument(
'''--try_masking''', action='''store_true''', help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''', default=0.9, type=UpperCamelCase__, help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''', )
parser.add_argument(
'''--masking_amount''', default=0.1, type=UpperCamelCase__, help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''', default='''acc''', type=UpperCamelCase__, help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''', default=1_2_8, type=UpperCamelCase__, help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
), )
parser.add_argument('''--batch_size''', default=1, type=UpperCamelCase__, help='''Batch size.''' )
parser.add_argument('''--seed''', type=UpperCamelCase__, default=4_2 )
parser.add_argument('''--local_rank''', type=UpperCamelCase__, default=-1, help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''', action='''store_true''', help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''', type=UpperCamelCase__, default='''''', help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''', type=UpperCamelCase__, default='''''', help='''Can be used for distant debugging.''' )
SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
SCREAMING_SNAKE_CASE__ : List[Any] =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.device('''cuda''', args.local_rank )
SCREAMING_SNAKE_CASE__ : Any =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device, args.n_gpu, bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE__ : Any =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE__ : str =nn.parallel.DistributedDataParallel(
UpperCamelCase__, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__, os.path.join(args.output_dir, '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''', UpperCamelCase__ )
# Prepare dataset
SCREAMING_SNAKE_CASE__ : List[str] =np.concatenate(
[
np.loadtxt(args.data_dir, dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE__ : Dict =(torch.from_numpy(UpperCamelCase__ ),)
SCREAMING_SNAKE_CASE__ : Any =TensorDataset(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =RandomSampler(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE__ : Any =mask_heads(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
prune_heads(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
main() | 296 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
__UpperCamelCase :str = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__UpperCamelCase :Dict = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase :Union[str, Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
__UpperCamelCase :List[Any] = f"""layers_{str(SCREAMING_SNAKE_CASE )}"""
# Self-Attention
__UpperCamelCase :List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
__UpperCamelCase :Any = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
__UpperCamelCase :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
__UpperCamelCase :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__UpperCamelCase :Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__UpperCamelCase :Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__UpperCamelCase :str = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__UpperCamelCase :Any = flax_model.params['''encoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer''']
__UpperCamelCase :Tuple = tax_attention_key
__UpperCamelCase :Union[str, Any] = tax_attention_out
__UpperCamelCase :Tuple = tax_attention_query
__UpperCamelCase :str = tax_attention_value
__UpperCamelCase :Tuple = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase :Optional[int] = tax_global_layer_norm
if split_mlp_wi:
__UpperCamelCase :List[str] = tax_mlp_wi_a
__UpperCamelCase :str = tax_mlp_wi_a
else:
__UpperCamelCase :Optional[int] = tax_mlp_wi
__UpperCamelCase :Optional[Any] = tax_mlp_wo
__UpperCamelCase :Union[str, Any] = tax_mlp_layer_norm
__UpperCamelCase :Dict = flax_model_encoder_layer_block
# Only for layer 0:
__UpperCamelCase :Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
__UpperCamelCase :Any = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__UpperCamelCase :Dict = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
__UpperCamelCase :Tuple = tax_encoder_global_rel_embedding
# Assigning
__UpperCamelCase :List[Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
__UpperCamelCase :List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__UpperCamelCase :Any = f"""layers_{str(SCREAMING_SNAKE_CASE )}"""
# Self-Attention
__UpperCamelCase :List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
__UpperCamelCase :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
__UpperCamelCase :str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
__UpperCamelCase :Dict = tax_enc_dec_attention_module['''key''']['''kernel''']
__UpperCamelCase :List[str] = tax_enc_dec_attention_module['''out''']['''kernel''']
__UpperCamelCase :Optional[Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
__UpperCamelCase :Optional[Any] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
__UpperCamelCase :List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
__UpperCamelCase :Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
__UpperCamelCase :List[str] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
__UpperCamelCase :Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
__UpperCamelCase :str = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
__UpperCamelCase :int = flax_model.params['''decoder''']['''block'''][str(SCREAMING_SNAKE_CASE )]['''layer''']
__UpperCamelCase :Optional[int] = tax_attention_key
__UpperCamelCase :Dict = tax_attention_out
__UpperCamelCase :Dict = tax_attention_query
__UpperCamelCase :Tuple = tax_attention_value
__UpperCamelCase :List[str] = tax_pre_attention_layer_norm
__UpperCamelCase :str = tax_enc_dec_attention_key
__UpperCamelCase :int = tax_enc_dec_attention_out
__UpperCamelCase :Optional[int] = tax_enc_dec_attention_query
__UpperCamelCase :Optional[Any] = tax_enc_dec_attention_value
__UpperCamelCase :Dict = tax_cross_layer_norm
if split_mlp_wi:
__UpperCamelCase :str = tax_mlp_wi_a
__UpperCamelCase :Dict = tax_mlp_wi_a
else:
__UpperCamelCase :Dict = tax_mlp_wi
__UpperCamelCase :Tuple = tax_mlp_wo
__UpperCamelCase :str = txa_mlp_layer_norm
__UpperCamelCase :str = flax_model_decoder_layer_block
# Decoder Normalization
__UpperCamelCase :Optional[int] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
__UpperCamelCase :Optional[int] = txa_decoder_norm
# Only for layer 0:
__UpperCamelCase :str = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
__UpperCamelCase :int = tax_decoder_rel_embedding
# Token Embeddings
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
__UpperCamelCase :str = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__UpperCamelCase :Optional[Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(SCREAMING_SNAKE_CASE )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
__lowercase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 452 | import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase :Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 452 | 1 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A : List[Any] = """src/transformers"""
A : Optional[Any] = """docs/source/en/tasks"""
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case : Optional[int] =f.readlines()
# Find the start prompt.
snake_case : Optional[Any] =0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
snake_case : Any =start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
A : int = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A : str = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _a ( lowerCamelCase_ ):
snake_case : List[str] =TASK_GUIDE_TO_MODELS[task_guide]
snake_case : Optional[int] =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase_ , set() )
snake_case : List[str] ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _a ( lowerCamelCase_ , lowerCamelCase_=False ):
snake_case , snake_case , snake_case , snake_case : Optional[int] =_find_text_in_file(
filename=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
snake_case : List[Any] =get_model_list_for_task(lowerCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 349 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 42
__UpperCAmelCase = jnp.floataa
__UpperCAmelCase = True
def __snake_case ( self : str ):
'''simple docstring'''
super().setup()
snake_case : List[Any] =nn.Dense(5, dtype=self.dtype )
def __call__( self : Optional[int], *_snake_case : str, **_snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Optional[int] =super().__call__(*_snake_case, **_snake_case )
snake_case : Optional[int] =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def cross_entropy(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
snake_case : int =logits.shape[-1]
snake_case : Any =(labels[..., None] == jnp.arange(lowerCamelCase_ )[None]).astype('''f4''' )
snake_case : List[str] =jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
snake_case : List[Any] =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case : Optional[Any] =reduction(lowerCamelCase_ )
return loss
snake_case : Optional[int] =partial(lowerCamelCase_ , reduction=jnp.mean )
snake_case : Optional[Any] =cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Tuple =cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Any =cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = "google/bigbird-roberta-base"
__UpperCAmelCase = 3000
__UpperCAmelCase = 1_0500
__UpperCAmelCase = 128
__UpperCAmelCase = 3
__UpperCAmelCase = 1
__UpperCAmelCase = 5
# tx_args
__UpperCAmelCase = 3e-5
__UpperCAmelCase = 0.0
__UpperCAmelCase = 2_0000
__UpperCAmelCase = 0.0_0_9_5
__UpperCAmelCase = "bigbird-roberta-natural-questions"
__UpperCAmelCase = "training-expt"
__UpperCAmelCase = "data/nq-training.jsonl"
__UpperCAmelCase = "data/nq-validation.jsonl"
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
os.makedirs(self.base_dir, exist_ok=_snake_case )
snake_case : Dict =os.path.join(self.base_dir, self.save_dir )
snake_case : Union[str, Any] =self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 4096 # no dynamic padding on TPUs
def __call__( self : List[Any], _snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case : Tuple =self.collate_fn(_snake_case )
snake_case : Dict =jax.tree_util.tree_map(_snake_case, _snake_case )
return batch
def __snake_case ( self : Dict, _snake_case : str ):
'''simple docstring'''
snake_case , snake_case : Dict =self.fetch_inputs(features['''input_ids'''] )
snake_case : List[str] ={
'''input_ids''': jnp.array(_snake_case, dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_snake_case, dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''], dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''], dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''], dtype=jnp.intaa ),
}
return batch
def __snake_case ( self : Optional[Any], _snake_case : list ):
'''simple docstring'''
snake_case : int =[self._fetch_inputs(_snake_case ) for ids in input_ids]
return zip(*_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : list ):
'''simple docstring'''
snake_case : List[Any] =[1 for _ in range(len(_snake_case ) )]
while len(_snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
if seed is not None:
snake_case : Union[str, Any] =dataset.shuffle(seed=lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) // batch_size ):
snake_case : List[Any] =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase_ )
@partial(jax.pmap , axis_name='''batch''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
def loss_fn(lowerCamelCase_ ):
snake_case : Dict =model_inputs.pop('''start_labels''' )
snake_case : Optional[Any] =model_inputs.pop('''end_labels''' )
snake_case : Any =model_inputs.pop('''pooled_labels''' )
snake_case : Dict =state.apply_fn(**lowerCamelCase_ , params=lowerCamelCase_ , dropout_rng=lowerCamelCase_ , train=lowerCamelCase_ )
snake_case , snake_case , snake_case : List[Any] =outputs
return state.loss_fn(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
snake_case , snake_case : Any =jax.random.split(lowerCamelCase_ )
snake_case : List[str] =jax.value_and_grad(lowerCamelCase_ )
snake_case , snake_case : str =grad_fn(state.params )
snake_case : Optional[Any] =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
snake_case : Any =jax.lax.pmean(lowerCamelCase_ , '''batch''' )
snake_case : Optional[int] =state.apply_gradients(grads=lowerCamelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _a ( lowerCamelCase_ , **lowerCamelCase_ ):
snake_case : List[Any] =model_inputs.pop('''start_labels''' )
snake_case : int =model_inputs.pop('''end_labels''' )
snake_case : List[str] =model_inputs.pop('''pooled_labels''' )
snake_case : Optional[Any] =state.apply_fn(**lowerCamelCase_ , params=state.params , train=lowerCamelCase_ )
snake_case , snake_case , snake_case : Dict =outputs
snake_case : List[Any] =state.loss_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[Any] =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class lowerCAmelCase_ ( train_state.TrainState ):
__UpperCAmelCase = struct.field(pytree_node=a_ )
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
def __snake_case ( self : Tuple, _snake_case : int, _snake_case : Any, _snake_case : Tuple, _snake_case : Any=None ):
'''simple docstring'''
snake_case : int =model.params
snake_case : List[str] =TrainState.create(
apply_fn=model.__call__, params=_snake_case, tx=_snake_case, loss_fn=_snake_case, )
if ckpt_dir is not None:
snake_case , snake_case , snake_case , snake_case , snake_case : Optional[Any] =restore_checkpoint(_snake_case, _snake_case )
snake_case : Tuple ={
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
snake_case , snake_case : Tuple =build_tx(**_snake_case )
snake_case : Optional[int] =train_state.TrainState(
step=_snake_case, apply_fn=model.__call__, params=_snake_case, tx=_snake_case, opt_state=_snake_case, )
snake_case : int =args
snake_case : str =data_collator
snake_case : Tuple =lr
snake_case : Union[str, Any] =params
snake_case : Tuple =jax_utils.replicate(_snake_case )
return state
def __snake_case ( self : Union[str, Any], _snake_case : int, _snake_case : int, _snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Dict =self.args
snake_case : Optional[int] =len(_snake_case ) // args.batch_size
snake_case : str =jax.random.PRNGKey(0 )
snake_case : Union[str, Any] =jax.random.split(_snake_case, jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case : Any =jnp.array(0, dtype=jnp.floataa )
snake_case : Dict =get_batched_dataset(_snake_case, args.batch_size, seed=_snake_case )
snake_case : Optional[Any] =0
for batch in tqdm(_snake_case, total=_snake_case, desc=f'''Running EPOCH-{epoch}''' ):
snake_case : Tuple =self.data_collator(_snake_case )
snake_case , snake_case , snake_case : Optional[Any] =self.train_step_fn(_snake_case, _snake_case, **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
snake_case : List[Any] =jax_utils.unreplicate(state.step )
snake_case : List[Any] =running_loss.item() / i
snake_case : Tuple =self.scheduler_fn(state_step - 1 )
snake_case : Optional[int] =self.evaluate(_snake_case, _snake_case )
snake_case : Tuple ={
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_snake_case ) )
self.logger.log(_snake_case, commit=_snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''', state=_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : List[str], _snake_case : Dict ):
'''simple docstring'''
snake_case : Union[str, Any] =get_batched_dataset(_snake_case, self.args.batch_size )
snake_case : Dict =len(_snake_case ) // self.args.batch_size
snake_case : List[str] =jnp.array(0, dtype=jnp.floataa )
snake_case : Optional[int] =0
for batch in tqdm(_snake_case, total=_snake_case, desc='''Evaluating ... ''' ):
snake_case : Dict =self.data_collator(_snake_case )
snake_case : str =self.val_step_fn(_snake_case, **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __snake_case ( self : Union[str, Any], _snake_case : Optional[Any], _snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Any =jax_utils.unreplicate(_snake_case )
print(f'''SAVING CHECKPOINT IN {save_dir}''', end=''' ... ''' )
self.model_save_fn(_snake_case, params=state.params )
with open(os.path.join(_snake_case, '''opt_state.msgpack''' ), '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args, os.path.join(_snake_case, '''args.joblib''' ) )
joblib.dump(self.data_collator, os.path.join(_snake_case, '''data_collator.joblib''' ) )
with open(os.path.join(_snake_case, '''training_state.json''' ), '''w''' ) as f:
json.dump({'''step''': state.step.item()}, _snake_case )
print('''DONE''' )
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(lowerCamelCase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
snake_case : Tuple =from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
snake_case : List[Any] =from_bytes(state.opt_state , f.read() )
snake_case : Tuple =joblib.load(os.path.join(lowerCamelCase_ , '''args.joblib''' ) )
snake_case : List[str] =joblib.load(os.path.join(lowerCamelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCamelCase_ , '''training_state.json''' ) , '''r''' ) as f:
snake_case : Optional[Any] =json.load(lowerCamelCase_ )
snake_case : Optional[int] =training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case : str =num_train_steps - warmup_steps
snake_case : Dict =optax.linear_schedule(init_value=lowerCamelCase_ , end_value=lowerCamelCase_ , transition_steps=lowerCamelCase_ )
snake_case : Tuple =optax.linear_schedule(init_value=lowerCamelCase_ , end_value=1e-7 , transition_steps=lowerCamelCase_ )
snake_case : int =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def weight_decay_mask(lowerCamelCase_ ):
snake_case : Tuple =traverse_util.flatten_dict(lowerCamelCase_ )
snake_case : List[Any] ={k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase_ )
snake_case : List[str] =scheduler_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
snake_case : Optional[Any] =optax.adamw(learning_rate=lowerCamelCase_ , weight_decay=lowerCamelCase_ , mask=lowerCamelCase_ )
return tx, lr
| 349 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : List[Any] = ['image_processor', 'tokenizer']
_snake_case : Optional[Any] = 'CLIPImageProcessor'
_snake_case : List[Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str , A_ : List[str]=None , A_ : List[str]=None , **A_ : int )-> int:
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
__UpperCamelCase = kwargs.pop("feature_extractor" )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self : Optional[int] , A_ : int=None , A_ : Dict=None , A_ : Any=None , **A_ : Optional[int] )-> Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCamelCase = self.tokenizer(A_ , return_tensors=A_ , **A_ )
if images is not None:
__UpperCamelCase = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def A ( self : Tuple , *A_ : Dict , **A_ : Dict )-> List[str]:
return self.tokenizer.batch_decode(*A_ , **A_ )
def A ( self : Tuple , *A_ : List[Any] , **A_ : Dict )-> Optional[int]:
return self.tokenizer.decode(*A_ , **A_ )
@property
def A ( self : Optional[Any] )-> Union[str, Any]:
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 228 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker | 228 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(__lowerCamelCase , '_dynamo' ):
return False
return isinstance(__lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCamelCase ( A__ , A__ = True ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase = is_compiled_module(__lowerCamelCase )
if is_compiled:
UpperCamelCase = model
UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase = model.module
if not keep_fpaa_wrapper:
UpperCamelCase = getattr(__lowerCamelCase , 'forward' )
UpperCamelCase = model.__dict__.pop('_original_forward' , __lowerCamelCase )
if original_forward is not None:
while hasattr(__lowerCamelCase , '__wrapped__' ):
UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase = forward
if getattr(__lowerCamelCase , '_converted_to_transformer_engine' , __lowerCamelCase ):
convert_model(__lowerCamelCase , to_transformer_engine=__lowerCamelCase )
if is_compiled:
UpperCamelCase = model
UpperCamelCase = compiled_model
return model
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
PartialState().wait_for_everyone()
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCamelCase , __lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCamelCase , __lowerCamelCase )
@contextmanager
def __lowerCamelCase ( **A__ ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
UpperCamelCase = str(__lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
if not hasattr(__lowerCamelCase , '__qualname__' ) and not hasattr(__lowerCamelCase , '__name__' ):
UpperCamelCase = getattr(__lowerCamelCase , '__class__' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(__lowerCamelCase , '__name__' ):
return obj.__name__
return str(__lowerCamelCase )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
for key, value in source.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase = destination.setdefault(__lowerCamelCase , {} )
merge_dicts(__lowerCamelCase , __lowerCamelCase )
else:
UpperCamelCase = value
return destination
def __lowerCamelCase ( A__ = None ) -> bool:
"""simple docstring"""
if port is None:
UpperCamelCase = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 430 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCamelCase__ :
a__ : torch.Tensor # [batch_size x 3]
a__ : torch.Tensor # [batch_size x 3]
a__ : torch.Tensor # [batch_size x 3]
a__ : torch.Tensor # [batch_size x 3]
a__ : int
a__ : int
a__ : float
a__ : float
a__ : Tuple[int]
def __lowercase( self : List[Any] ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowercase( self : Tuple ) -> Tuple:
return torch.from_numpy(np.array([self.width, self.height], dtype=np.floataa ) )
def __lowercase( self : Optional[Any] ) -> List[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.floataa ) )
def __lowercase( self : List[Any] ) -> torch.Tensor:
UpperCamelCase__ : List[Any] = torch.arange(self.height * self.width )
UpperCamelCase__ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(__lowerCamelCase, self.width, rounding_mode='''trunc''' ),
], axis=1, )
return coords
@property
def __lowercase( self : Tuple ) -> Optional[int]:
UpperCamelCase__ ,*UpperCamelCase__ : Dict = self.shape
UpperCamelCase__ : List[Any] = int(np.prod(__lowerCamelCase ) )
UpperCamelCase__ : Optional[int] = self.get_image_coords()
UpperCamelCase__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ), [batch_size * inner_batch_size, *coords.shape] )
UpperCamelCase__ : Optional[int] = self.get_camera_rays(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = rays.view(__lowerCamelCase, inner_batch_size * self.height * self.width, 2, 3 )
return rays
def __lowercase( self : Any, __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
UpperCamelCase__ ,*UpperCamelCase__ ,UpperCamelCase__ : List[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase__ : Optional[int] = coords.view(__lowerCamelCase, -1, 2 )
UpperCamelCase__ : List[Any] = self.resolution()
UpperCamelCase__ : Dict = self.fov()
UpperCamelCase__ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase__ : int = fracs * torch.tan(fov / 2 )
UpperCamelCase__ : List[Any] = fracs.view(__lowerCamelCase, -1, 2 )
UpperCamelCase__ : Optional[Any] = (
self.z.view(__lowerCamelCase, 1, 3 )
+ self.x.view(__lowerCamelCase, 1, 3 ) * fracs[:, :, :1]
+ self.y.view(__lowerCamelCase, 1, 3 ) * fracs[:, :, 1:]
)
UpperCamelCase__ : List[str] = directions / directions.norm(dim=-1, keepdim=__lowerCamelCase )
UpperCamelCase__ : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__lowerCamelCase, 1, 3 ), [batch_size, directions.shape[1], 3] ),
directions,
], dim=2, )
return rays.view(__lowerCamelCase, *__lowerCamelCase, 2, 3 )
def __lowercase( self : Union[str, Any], __lowerCamelCase : int, __lowerCamelCase : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin, x=self.x, y=self.y, z=self.z, width=__lowerCamelCase, height=__lowerCamelCase, x_fov=self.x_fov, y_fov=self.y_fov, )
def _lowercase ( __lowerCamelCase : int ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Tuple = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
UpperCamelCase__ : Union[str, Any] = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase__ : Optional[int] = -z * 4
UpperCamelCase__ : Tuple = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
UpperCamelCase__ : List[str] = np.cross(__lowerCamelCase ,__lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(__lowerCamelCase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(__lowerCamelCase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(__lowerCamelCase ,axis=0 ) ).float() ,width=__lowerCamelCase ,height=__lowerCamelCase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(__lowerCamelCase )) ,)
| 344 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , "width_multiplier" ) )
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a="swish" , __a=3 , __a=32 , __a=0.1 , __a=0.02 , __a=True , __a=True , __a=10 , __a=None , __a=0.25 , __a=0.0 , __a=0.0 , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = make_divisible(5_12 * width_multiplier , divisor=8 )
lowerCamelCase = hidden_act
lowerCamelCase = conv_kernel_size
lowerCamelCase = output_stride
lowerCamelCase = classifier_dropout_prob
lowerCamelCase = use_labels
lowerCamelCase = is_training
lowerCamelCase = num_labels
lowerCamelCase = initializer_range
lowerCamelCase = scope
lowerCamelCase = width_multiplier
lowerCamelCase = ffn_dropout
lowerCamelCase = attn_dropout
def _a (self ):
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a (self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _a (self , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = MobileViTVaModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a (self , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = MobileViTVaForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , __a , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = MobileViTVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
lowerCamelCase = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = MobileViTVaModelTester(self )
lowerCamelCase = MobileViTVaConfigTester(self , config_class=__a , has_text_modality=__a )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _a (self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _a (self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(__a )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _a (self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
lowerCamelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = 5
self.assertEqual(len(__a ) , __a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(__a , __a , __a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def _a (self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = MobileViTVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _a (self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
__a )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**__a )
# verify the logits
lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCamelCase = model.to(__a )
lowerCamelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**__a )
lowerCamelCase = outputs.logits
# verify the logits
lowerCamelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __a )
lowerCamelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
@slow
def _a (self ):
'''simple docstring'''
lowerCamelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCamelCase = model.to(__a )
lowerCamelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**__a )
lowerCamelCase = outputs.logits.detach().cpu()
lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] )
lowerCamelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __a )
lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=__a )
lowerCamelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __a ) | 484 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : int = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 484 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 0 ) -> list:
"""simple docstring"""
snake_case_ : Dict = length or len(_UpperCamelCase )
snake_case_ : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ : Tuple = list_data[i + 1], list_data[i]
snake_case_ : Optional[Any] = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ = '''hf-internal-testing/tiny-random-bert'''
lowercase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowercase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , "refs" , "main" ) ) as f:
snake_case_ = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
snake_case_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
snake_case_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="9b8c223" )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "snapshots" , UpperCAmelCase_ , UpperCAmelCase_ ) )
def _lowercase ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier" ):
snake_case_ = cached_file("tiny-random-bert" , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier" ):
snake_case_ = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="aaaa" )
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named" ):
snake_case_ = cached_file(UpperCAmelCase_ , "conf" )
def _lowercase ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , "does not appear to have a file named" ):
snake_case_ = cached_file(UpperCAmelCase_ , "conf" )
with open(os.path.join(UpperCAmelCase_ , "refs" , "main" ) ) as f:
snake_case_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , ".no_exist" , UpperCAmelCase_ , "conf" ) ) )
snake_case_ = cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
snake_case_ = cached_file(UpperCAmelCase_ , "conf" , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
snake_case_ = mock.Mock()
snake_case_ = 5_00
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_ ) as mock_head:
snake_case_ = cached_file(UpperCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase_ ) )
def _lowercase ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCAmelCase_ , revision="ahaha" )
snake_case_ = get_file_from_repo("bert-base-cased" , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case_ = json.loads(open(UpperCAmelCase_ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_68 )
def _lowercase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = Path(UpperCAmelCase_ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , "a.txt" ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , "b.txt" ) )
| 508 | 0 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =[[] for _ in range(__UpperCamelCase )]
__UpperCamelCase =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(__UpperCamelCase ):
__UpperCamelCase =position % (lowest * 2) # puts it in bounds
__UpperCamelCase =min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__UpperCamelCase )
__UpperCamelCase =[''''''.join(__UpperCamelCase ) for row in temp_grid]
__UpperCamelCase =''''''.join(__UpperCamelCase )
return output_string
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =[]
__UpperCamelCase =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCamelCase =[[] for _ in range(__UpperCamelCase )] # generates template
for position in range(len(__UpperCamelCase ) ):
__UpperCamelCase =position % (lowest * 2) # puts it in bounds
__UpperCamelCase =min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCamelCase =0
for row in temp_grid: # fills in the characters
__UpperCamelCase =input_string[counter : counter + len(__UpperCamelCase )]
grid.append(list(__UpperCamelCase ) )
counter += len(__UpperCamelCase )
__UpperCamelCase ='''''' # reads as zigzag
for position in range(len(__UpperCamelCase ) ):
__UpperCamelCase =position % (lowest * 2) # puts it in bounds
__UpperCamelCase =min(__UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase ={}
for key_guess in range(1 , len(__UpperCamelCase ) ): # tries every key
__UpperCamelCase =decrypt(__UpperCamelCase , __UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 | """simple docstring"""
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
__UpperCamelCase =arr.split(''',''' )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__UpperCamelCase =max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__UpperCamelCase =max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowercase = input('''please input some numbers:''')
__lowercase = SubArray(whole_array)
__lowercase = array.solve_sub_array()
print(('''the results is:''', re))
| 296 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'OwlViTImageProcessor'
_lowerCamelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : str = kwargs.pop("feature_extractor" )
_snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="max_length" , lowercase_="np" , **lowercase_ ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowercase_ , lowercase_ ) or (isinstance(lowercase_ , lowercase_ ) and not isinstance(text[0] , lowercase_ )):
_snake_case : List[Any] = [self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(text[0] , lowercase_ ):
_snake_case : str = []
# Maximum number of queries across batch
_snake_case : Tuple = max([len(lowercase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase_ ) != max_num_queries:
_snake_case : Optional[int] = t + [" "] * (max_num_queries - len(lowercase_ ))
_snake_case : Optional[int] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
encodings.append(lowercase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_snake_case : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_snake_case : Any = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_snake_case : List[str] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_snake_case : List[str] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_snake_case : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_snake_case : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_snake_case : Optional[int] = BatchEncoding()
_snake_case : List[str] = input_ids
_snake_case : str = attention_mask
if query_images is not None:
_snake_case : List[str] = BatchEncoding()
_snake_case : Any = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ ).pixel_values
_snake_case : List[Any] = query_pixel_values
if images is not None:
_snake_case : Union[str, Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_object_detection(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_image_guided_detection(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def UpperCamelCase ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor | 670 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 1 |
from PIL import Image
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> int:
def brightness(UpperCamelCase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
lowerCAmelCase__ = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 720 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
a_, r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class lowercase ( a_ ):
def _snake_case ( self , _snake_case) -> np.ndarray:
if self.framework == "tf":
UpperCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
UpperCAmelCase_ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case)
else:
raise ValueError('Unsupported framework')
return masked_index
def _snake_case ( self , _snake_case) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = self.get_masked_index(_snake_case)
UpperCAmelCase_ : int = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _snake_case ( self , _snake_case) -> int:
if isinstance(_snake_case , _snake_case):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_snake_case)
def _snake_case ( self , _snake_case , _snake_case=None , **_snake_case) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCAmelCase_ : Optional[Any] = self.framework
UpperCAmelCase_ : str = self.tokenizer(_snake_case , return_tensors=_snake_case)
self.ensure_exactly_one_mask_token(_snake_case)
return model_inputs
def _snake_case ( self , _snake_case) -> Optional[int]:
UpperCAmelCase_ : List[str] = self.model(**_snake_case)
UpperCAmelCase_ : Optional[Any] = model_inputs['input_ids']
return model_outputs
def _snake_case ( self , _snake_case , _snake_case=5 , _snake_case=None) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ : Optional[int] = target_ids.shape[0]
UpperCAmelCase_ : Union[str, Any] = model_outputs['input_ids'][0]
UpperCAmelCase_ : Optional[Any] = model_outputs['logits']
if self.framework == "tf":
UpperCAmelCase_ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
UpperCAmelCase_ : Tuple = outputs.numpy()
UpperCAmelCase_ : Dict = outputs[0, masked_index, :]
UpperCAmelCase_ : List[str] = stable_softmax(_snake_case , axis=-1)
if target_ids is not None:
UpperCAmelCase_ : str = tf.gather_nd(tf.squeeze(_snake_case , 0) , target_ids.reshape(-1 , 1))
UpperCAmelCase_ : str = tf.expand_dims(_snake_case , 0)
UpperCAmelCase_ : int = tf.math.top_k(_snake_case , k=_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ : int = outputs[0, masked_index, :]
UpperCAmelCase_ : str = logits.softmax(dim=-1)
if target_ids is not None:
UpperCAmelCase_ : List[str] = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = probs.topk(_snake_case)
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
UpperCAmelCase_ : Union[str, Any] = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ : str = target_ids[p].tolist()
UpperCAmelCase_ : Union[str, Any] = p
# Filter padding out:
UpperCAmelCase_ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ : Tuple = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p]), 'sequence': sequence}
row.append(_snake_case)
result.append(_snake_case)
if single_mask:
return result[0]
return result
def _snake_case ( self , _snake_case , _snake_case=None) -> List[str]:
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ : List[str] = [targets]
try:
UpperCAmelCase_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : List[Any] = []
for target in targets:
UpperCAmelCase_ : Optional[int] = vocab.get(_snake_case , _snake_case)
if id_ is None:
UpperCAmelCase_ : int = self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['input_ids']
if len(_snake_case) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it')
continue
UpperCAmelCase_ : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
UpperCAmelCase_ : Union[str, Any] = list(set(_snake_case))
if len(_snake_case) == 0:
raise ValueError('At least one target must be provided when passed.')
UpperCAmelCase_ : Dict = np.array(_snake_case)
return target_ids
def _snake_case ( self , _snake_case=None , _snake_case=None) -> Dict:
UpperCAmelCase_ : str = {}
if targets is not None:
UpperCAmelCase_ : Dict = self.get_target_ids(_snake_case , _snake_case)
UpperCAmelCase_ : Optional[int] = target_ids
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.')
return {}, {}, postprocess_params
def __call__( self , _snake_case , *_snake_case , **_snake_case) -> Optional[int]:
UpperCAmelCase_ : Any = super().__call__(_snake_case , **_snake_case)
if isinstance(_snake_case , _snake_case) and len(_snake_case) == 1:
return outputs[0]
return outputs
| 471 | 0 |
'''simple docstring'''
import argparse
_lowercase = """docs/source/_static/js/custom.js"""
def A (__lowerCamelCase :List[Any] ):
with open(__lowerCamelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
_lowerCAmelCase = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_lowercase = parser.parse_args()
update_custom_js(args.version)
| 5 | '''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = TapasConfig.from_json_file(_UpperCamelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_ = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_ = 4
UpperCAmelCase_ = True
# hparam_utils.py hparams
UpperCAmelCase_ = 0.664_694
UpperCAmelCase_ = 0.207_951
UpperCAmelCase_ = 0.121_194
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = 0.0_352_513
UpperCAmelCase_ = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_ = 4
UpperCAmelCase_ = False
# hparam_utils.py hparams
UpperCAmelCase_ = 36.4_519
UpperCAmelCase_ = 0.903_421
UpperCAmelCase_ = 222.088
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = 0.763_141
UpperCAmelCase_ = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "TABFACT":
UpperCAmelCase_ = TapasForSequenceClassification(config=_UpperCamelCase )
elif task == "MLM":
UpperCAmelCase_ = TapasForMaskedLM(config=_UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_ = TapasModel(config=_UpperCamelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(_UpperCamelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 390 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def a__ ( a , a ) -> str:
A_ : Any = b.T
A_ : Dict = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
A_ : int = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
A_ : Optional[Any] = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
A_ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def a__ ( a , a ) -> Union[str, Any]:
A_ : Dict = x.reshape(-1 , 3 )
A_ : List[str] = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = ["""pixel_values"""]
def __init__( self , __magic_name__ = None , __magic_name__ = True , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = True , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
A_ : List[str] = size if size is not None else {'''height''': 256, '''width''': 256}
A_ : Dict = get_size_dict(_lowerCAmelCase )
A_ : List[Any] = np.array(_lowerCAmelCase ) if clusters is not None else None
A_ : Tuple = do_resize
A_ : Union[str, Any] = size
A_ : Any = resample
A_ : List[Any] = do_normalize
A_ : Optional[int] = do_color_quantize
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
A_ : Any = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
_lowerCAmelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , ):
"""simple docstring"""
A_ : List[str] = rescale(image=_lowerCAmelCase , scale=1 / 1_27.5 , data_format=_lowerCAmelCase )
A_ : Tuple = image - 1
return image
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
"""simple docstring"""
A_ : Dict = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : int = get_size_dict(_lowerCAmelCase )
A_ : Tuple = resample if resample is not None else self.resample
A_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
A_ : Dict = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A_ : Optional[int] = clusters if clusters is not None else self.clusters
A_ : Optional[int] = np.array(_lowerCAmelCase )
A_ : Any = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
A_ : Any = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
A_ : List[str] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_normalize:
A_ : str = [self.normalize(image=_lowerCAmelCase ) for image in images]
if do_color_quantize:
A_ : str = [to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A_ : Union[str, Any] = np.array(_lowerCAmelCase )
A_ : Dict = color_quantize(_lowerCAmelCase , _lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A_ : Union[str, Any] = images.shape[0]
A_ : Optional[int] = images.reshape(_lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A_ : Optional[int] = list(_lowerCAmelCase )
else:
A_ : Optional[int] = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
A_ : Tuple = {'''input_ids''': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 709 | from __future__ import annotations
from collections.abc import Callable
def a__ ( a , a , a , a = 1_0_0 , ) -> float:
A_ : Any = x_start
A_ : int = fnc(a )
A_ : int = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ : List[Any] = (x_end - x_start) / steps + xa
A_ : Dict = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ : Optional[int] = xa
A_ : List[str] = fxa
return area
if __name__ == "__main__":
def a__ ( a ) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 236 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
_UpperCamelCase : int =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def a__ () -> Tuple:
_A : Union[str, Any] = os.path.dirname(os.path.realpath(__A ) )
_A : Optional[int] = os.path.join(__A , '''words.txt''' )
_A : List[str] = ''''''
with open(__A ) as f:
_A : Dict = f.readline()
_A : Tuple = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_A : Tuple = [
word
for word in [sum(ord(__A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__A )
if __name__ == "__main__":
print(solution())
| 206 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 14 ) -> None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
a_ : Union[str, Any] = primes[group]['''prime''']
a_ : List[str] = primes[group]['''generator''']
a_ : str = int(hexlify(urandom(32 ) ) , base=16 )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return hex(self.__private_key )[2:]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__SCREAMING_SNAKE_CASE )[2:]
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str ) -> str:
a_ : str = int(__SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid public key''' )
a_ : Optional[int] = pow(__SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__SCREAMING_SNAKE_CASE , (prime - 1) // 2 , __SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 14 ) -> str:
a_ : Tuple = int(__SCREAMING_SNAKE_CASE , base=16 )
a_ : Any = int(__SCREAMING_SNAKE_CASE , base=16 )
a_ : List[str] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid public key''' )
a_ : Optional[int] = pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int=100 , _SCREAMING_SNAKE_CASE: str=13 , _SCREAMING_SNAKE_CASE: List[str]=30 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Any=3 , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Optional[int]=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Any=37 , _SCREAMING_SNAKE_CASE: int="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Any=3 , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=[0, 1, 2, 3] , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Dict = 100
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Tuple = image_size
__lowerCAmelCase : Dict = patch_size
__lowerCAmelCase : Dict = num_channels
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : int = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Optional[Any] = scope
__lowerCAmelCase : Optional[int] = out_indices
__lowerCAmelCase : Optional[int] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : int = (image_size // patch_size) ** 2
__lowerCAmelCase : List[str] = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self: Any) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = BeitModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = BeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = self.type_sequence_label_size
__lowerCAmelCase : Optional[Any] = BeitForImageClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : Optional[Any] = BeitForImageClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = BeitForSemanticSegmentation(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2))
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = config_and_inputs
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = BeitModelTester(self)
__lowerCAmelCase : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_SCREAMING_SNAKE_CASE), BeitForMaskedImageModeling]:
continue
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.train()
__lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_SCREAMING_SNAKE_CASE), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE)
model.gradient_checkpointing_enable()
model.to(_SCREAMING_SNAKE_CASE)
model.train()
__lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = _config_zero_init(_SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = model_class(config=_SCREAMING_SNAKE_CASE)
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = BeitModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def _lowercase ( ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k").to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.default_image_processor
__lowerCAmelCase : Union[str, Any] = prepare_img()
__lowerCAmelCase : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values.to(_SCREAMING_SNAKE_CASE)
# prepare bool_masked_pos
__lowerCAmelCase : Optional[int] = torch.ones((1, 196) , dtype=torch.bool).to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__lowerCAmelCase : Dict = model(pixel_values=_SCREAMING_SNAKE_CASE , bool_masked_pos=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = outputs.logits
# verify the logits
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 196, 8192))
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]]).to(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-2))
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224").to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.default_image_processor
__lowerCAmelCase : List[str] = prepare_img()
__lowerCAmelCase : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = outputs.logits
# verify the logits
__lowerCAmelCase : List[Any] = torch.Size((1, 1000))
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = torch.tensor([-1.2385, -1.0987, -1.0108]).to(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
__lowerCAmelCase : int = 281
self.assertEqual(logits.argmax(-1).item() , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k").to(
_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : Tuple = prepare_img()
__lowerCAmelCase : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = outputs.logits
# verify the logits
__lowerCAmelCase : Dict = torch.Size((1, 2_1841))
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = torch.tensor([1.6881, -0.2787, 0.5901]).to(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
__lowerCAmelCase : Tuple = 2396
self.assertEqual(logits.argmax(-1).item() , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
__lowerCAmelCase : List[str] = model.to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = BeitImageProcessor(do_resize=_SCREAMING_SNAKE_CASE , size=640 , do_center_crop=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test")
__lowerCAmelCase : List[Any] = Image.open(ds[0]["file"])
__lowerCAmelCase : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = outputs.logits
# verify the logits
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 150, 160, 160))
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = version.parse(PIL.__version__) < version.parse("9.0.0")
if is_pillow_less_than_a:
__lowerCAmelCase : str = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : List[Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
__lowerCAmelCase : int = model.to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = BeitImageProcessor(do_resize=_SCREAMING_SNAKE_CASE , size=640 , do_center_crop=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test")
__lowerCAmelCase : Dict = Image.open(ds[0]["file"])
__lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = outputs.logits.detach().cpu()
__lowerCAmelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)])
__lowerCAmelCase : int = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = torch.Size((160, 160))
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE) | 615 |
"""simple docstring"""
import string
from math import logaa
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : int = document.translate(
str.maketrans("" ,"" ,string.punctuation ) ).replace("\n" ,"" )
__lowerCAmelCase : Dict = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _lowercase ( __snake_case ,__snake_case ) -> tuple[int, int]:
__lowerCAmelCase : Optional[Any] = corpus.lower().translate(
str.maketrans("" ,"" ,string.punctuation ) ) # strip all punctuation and replace it with ''
__lowerCAmelCase : List[str] = corpus_without_punctuation.split("\n" )
__lowerCAmelCase : str = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def _lowercase ( __snake_case ,__snake_case ,__snake_case=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) ,3 )
def _lowercase ( __snake_case ,__snake_case ) -> float:
return round(tf * idf ,3 ) | 615 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'timm_backbone'
def __init__( self : List[Any] , _A : str=None , _A : List[str]=3 , _A : Dict=True , _A : Optional[Any]=True , _A : Tuple=None , **_A : Dict , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = backbone
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : str = features_only
UpperCAmelCase__ : List[str] = use_pretrained_backbone
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 75 |
import math
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase )
if number < 1:
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_ = int(math.log(number // 3 ,2 ) ) + 2
SCREAMING_SNAKE_CASE_ = [3, 5]
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
for block in range(1 ,UpperCAmelCase ):
for _ in range(UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
A_ = 0
try:
A_ = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 393 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : Union[str, Any] = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "autoformer"
_A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Optional[int] , A_ : Optional[int] = None , A_ : Optional[int] = None , A_ : str = "student_t" , A_ : str = "nll" , A_ : int = 1 , A_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , A_ : bool = True , A_ : int = 0 , A_ : int = 0 , A_ : int = 0 , A_ : int = 0 , A_ : Optional[List[int]] = None , A_ : Optional[List[int]] = None , A_ : int = 64 , A_ : int = 2 , A_ : int = 2 , A_ : int = 2 , A_ : int = 2 , A_ : int = 32 , A_ : int = 32 , A_ : str = "gelu" , A_ : float = 0.1 , A_ : float = 0.1 , A_ : float = 0.1 , A_ : float = 0.1 , A_ : float = 0.1 , A_ : int = 1_00 , A_ : float = 0.02 , A_ : bool = True , A_ : Optional[Any]=True , A_ : int = 10 , A_ : int = 25 , A_ : int = 3 , **A_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: List[str] = prediction_length
lowerCamelCase_: Tuple = context_length if context_length is not None else prediction_length
lowerCamelCase_: Optional[int] = distribution_output
lowerCamelCase_: Union[str, Any] = loss
lowerCamelCase_: Optional[Any] = input_size
lowerCamelCase_: Tuple = num_time_features
lowerCamelCase_: Optional[Any] = lags_sequence
lowerCamelCase_: Union[str, Any] = scaling
lowerCamelCase_: List[str] = num_dynamic_real_features
lowerCamelCase_: Dict = num_static_real_features
lowerCamelCase_: Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase_: List[Any] = cardinality
else:
lowerCamelCase_: Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase_: str = embedding_dimension
else:
lowerCamelCase_: List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_: Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_: Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_: List[str] = d_model
lowerCamelCase_: Union[str, Any] = encoder_attention_heads
lowerCamelCase_: Optional[Any] = decoder_attention_heads
lowerCamelCase_: Union[str, Any] = encoder_ffn_dim
lowerCamelCase_: Optional[Any] = decoder_ffn_dim
lowerCamelCase_: Optional[Any] = encoder_layers
lowerCamelCase_: Optional[Any] = decoder_layers
lowerCamelCase_: Optional[int] = dropout
lowerCamelCase_: Any = attention_dropout
lowerCamelCase_: str = activation_dropout
lowerCamelCase_: Optional[int] = encoder_layerdrop
lowerCamelCase_: List[Any] = decoder_layerdrop
lowerCamelCase_: str = activation_function
lowerCamelCase_: Tuple = init_std
lowerCamelCase_: int = use_cache
# Autoformer
lowerCamelCase_: int = label_length
lowerCamelCase_: Any = moving_average
lowerCamelCase_: Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 703 | from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 584 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowerCamelCase ( unittest.TestCase ):
UpperCamelCase_ : str = StableDiffusionLDMaDPipeline
UpperCamelCase_ : int = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self :Optional[int] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(lowercase )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self :Union[str, Any] , lowercase :List[Any] , lowercase :int=0 ) -> Optional[int]:
"""simple docstring"""
if str(lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowercase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowercase ).manual_seed(lowercase )
SCREAMING_SNAKE_CASE = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
SCREAMING_SNAKE_CASE = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
SCREAMING_SNAKE_CASE = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowercase )
SCREAMING_SNAKE_CASE = 3 * [inputs['''prompt''']]
# forward
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowercase )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('''prompt''' )]
SCREAMING_SNAKE_CASE = ldmad_pipe.tokenizer(
lowercase , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE = text_inputs['''input_ids'''].to(lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe.text_encoder(lowercase )[0]
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self :int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowercase )
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowercase )
SCREAMING_SNAKE_CASE = '''french fries'''
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase , negative_prompt=lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
SCREAMING_SNAKE_CASE = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
SCREAMING_SNAKE_CASE = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self :Any , lowercase :Any , lowercase :int="cpu" , lowercase :Union[str, Any]=torch.floataa , lowercase :Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=lowercase ).manual_seed(lowercase )
SCREAMING_SNAKE_CASE = np.random.RandomState(lowercase ).standard_normal((1, 4, 6_4, 6_4) )
SCREAMING_SNAKE_CASE = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
SCREAMING_SNAKE_CASE = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
SCREAMING_SNAKE_CASE = self.get_inputs(lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
SCREAMING_SNAKE_CASE = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
SCREAMING_SNAKE_CASE = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :List[Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self :Any , lowercase :Any , lowercase :Optional[Any]="cpu" , lowercase :str=torch.floataa , lowercase :Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=lowercase ).manual_seed(lowercase )
SCREAMING_SNAKE_CASE = np.random.RandomState(lowercase ).standard_normal((1, 4, 6_4, 6_4) )
SCREAMING_SNAKE_CASE = torch.from_numpy(lowercase ).to(device=lowercase , dtype=lowercase )
SCREAMING_SNAKE_CASE = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
SCREAMING_SNAKE_CASE = self.get_inputs(lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = 0.49_55_86
SCREAMING_SNAKE_CASE = 0.33_79_55_15
SCREAMING_SNAKE_CASE = 1_12.4_85_18
SCREAMING_SNAKE_CASE = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(lowercase )
ldmad_pipe.set_progress_bar_config(disable=lowercase )
SCREAMING_SNAKE_CASE = self.get_inputs(lowercase )
SCREAMING_SNAKE_CASE = ldmad_pipe(**lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = 0.4_19_41_27
SCREAMING_SNAKE_CASE = 0.35_37_55_86
SCREAMING_SNAKE_CASE = 0.5_63_85_02
SCREAMING_SNAKE_CASE = 0.34_68_61_03
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3 | 201 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def a ( a ) ->List[Any]:
'''simple docstring'''
if hor == 128:
SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
SCREAMING_SNAKE_CASE = (32, 128, 256)
SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
SCREAMING_SNAKE_CASE = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
SCREAMING_SNAKE_CASE = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_5536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
SCREAMING_SNAKE_CASE = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f:
json.dump(a , a )
def a ( ) ->Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_5536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
SCREAMING_SNAKE_CASE = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(a , a )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function() | 201 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 714 | def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
# Return True if there is node that has not iterated.
_UpperCamelCase = [False] * len(__snake_case )
_UpperCamelCase = []
queue.append(__snake_case )
_UpperCamelCase = True
while queue:
_UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case )
_UpperCamelCase = True
_UpperCamelCase = u
return visited[t]
def _snake_case ( __snake_case , __snake_case , __snake_case ):
# This array is filled by BFS and to store path
_UpperCamelCase = [-1] * (len(__snake_case ))
_UpperCamelCase = 0
while bfs(__snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = float('''Inf''' )
_UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase = min(__snake_case , graph[parent[s]][s] )
_UpperCamelCase = parent[s]
max_flow += path_flow
_UpperCamelCase = sink
while v != source:
_UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase = parent[v]
return max_flow
_lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCAmelCase, _lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 71 | 0 |
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ''
for i in table:
res += inp[i - 1]
return res
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return data[1:] + data[0]
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = ''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = int('0b' + data[0] + data[-1], 2 )
__lowerCAmelCase = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = message[:4]
__lowerCAmelCase = message[4:]
__lowerCAmelCase = apply_table(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = xor(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = apply_sbox(lowerCAmelCase_, temp[:4] ) # noqa: E741
__lowerCAmelCase = apply_sbox(lowerCAmelCase_, temp[4:] )
__lowerCAmelCase = '0' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
__lowerCAmelCase = '0' * (2 - len(lowerCAmelCase_ )) + r
__lowerCAmelCase = apply_table(l + r, lowerCAmelCase_ )
__lowerCAmelCase = xor(lowerCAmelCase_, lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
_snake_case : str = input('Enter 10 bit key: ')
_snake_case : Any = input('Enter 8 bit message: ')
_snake_case : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case : List[Any] = [2, 4, 3, 1]
_snake_case : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case : Optional[Any] = apply_table(key, paa_table)
_snake_case : Any = temp[:5]
_snake_case : Dict = temp[5:]
_snake_case : Dict = left_shift(left)
_snake_case : Any = left_shift(right)
_snake_case : Optional[int] = apply_table(left + right, pa_table)
_snake_case : Optional[Any] = left_shift(left)
_snake_case : Any = left_shift(right)
_snake_case : Tuple = left_shift(left)
_snake_case : List[str] = left_shift(right)
_snake_case : Optional[Any] = apply_table(left + right, pa_table)
# encryption
_snake_case : Any = apply_table(message, IP)
_snake_case : Optional[Any] = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[int] = temp[4:] + temp[:4]
_snake_case : Optional[Any] = function(expansion, sa, sa, keya, temp)
_snake_case : str = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_snake_case : Tuple = apply_table(CT, IP)
_snake_case : Dict = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[int] = temp[4:] + temp[:4]
_snake_case : Any = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 53 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=1_4 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=9_9 , SCREAMING_SNAKE_CASE : Tuple=3_2 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Tuple=3_7 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : int=5_1_2 , SCREAMING_SNAKE_CASE : int=0.0_2 , ) -> Any:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = rotary_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = None
lowerCAmelCase = vocab_size - 1
lowerCAmelCase = vocab_size - 1
lowerCAmelCase = vocab_size - 1
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 2_0
lowerCAmelCase = model_class_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE )
lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = 2_0
lowerCAmelCase = model_class_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = FlaxGPTJModelTester(self )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@tooslow
def __A ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCAmelCase = False
lowerCAmelCase = model.config.eos_token_id
lowerCAmelCase = jax.jit(model.generate )
lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __A ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = pt_inputs["input_ids"].shape
lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = pt_model_class(SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE )
lowerCAmelCase = fx_state
with torch.no_grad():
lowerCAmelCase = pt_model(**SCREAMING_SNAKE_CASE ).to_tuple()
lowerCAmelCase = fx_model(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , from_pt=SCREAMING_SNAKE_CASE )
lowerCAmelCase = fx_model_loaded(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = pt_model_class(SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
lowerCAmelCase = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , fx_model.params )
lowerCAmelCase , lowerCAmelCase = pt_inputs["input_ids"].shape
lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 0
lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase = pt_model(**SCREAMING_SNAKE_CASE ).to_tuple()
lowerCAmelCase = fx_model(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE , from_flax=SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase = pt_model_loaded(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 720 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase : Tuple = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def __a ( A__ , A__ ) -> Optional[Any]:
return torch.atana(A__ , A__ ) / math.pi * 2
def __a ( A__ ) -> List[str]:
lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A__ , A__ )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCAmelCase = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE , n_attn_layers=4 )
lowerCAmelCase = deepcopy(self.diffusion )
lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=SCREAMING_SNAKE_CASE )
def __a ( A__ ) -> Dict:
lowerCAmelCase = MODELS_MAP[model_name]["url"]
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
lowercase : List[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowercase : int = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowercase : Optional[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowercase : List[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowercase : Optional[Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowercase : Union[str, Any] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def __a ( A__ ) -> str:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __a ( A__ ) -> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(A__ ) and not isinstance(A__ , A__ ):
return name.replace(A__ , A__ )
elif name.startswith(A__ ):
return [name.replace(A__ , A__ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def __a ( A__ , A__=13 ) -> str:
lowerCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
lowerCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
lowerCAmelCase = string[6:]
elif string.startswith("net." ):
lowerCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
lowerCAmelCase = string[7:]
if string.startswith("main." ):
lowerCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase = string[:2]
lowerCAmelCase = string[2:]
else:
lowerCAmelCase = string[0]
lowerCAmelCase = string[1:]
if depth == max_depth:
lowerCAmelCase = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase = "mid_block"
elif depth > 0 and int(A__ ) < 7:
lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase = f"down_blocks.{depth}"
elif depth > 0 and int(A__ ) > 7:
lowerCAmelCase = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase = f"up_blocks.{max_depth - 1}" if int(A__ ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
lowerCAmelCase = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase = convert_resconv_naming(A__ )
elif "attentions" in new_layer:
lowerCAmelCase = convert_attn_naming(A__ )
lowerCAmelCase = new_string_left
if not isinstance(A__ , A__ ):
lowerCAmelCase = prefix + "." + new_layer + "." + string_left
else:
lowerCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __a ( A__ ) -> str:
lowerCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase = rename(A__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A__ , A__ ):
lowerCAmelCase = transform_conv_attns(A__ , A__ , A__ )
else:
lowerCAmelCase = v
return new_state_dict
def __a ( A__ , A__ , A__ ) -> Any:
if len(A__ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase = v[:, :, 0]
else:
# bias
lowerCAmelCase = v
else:
# qkv matrices
lowerCAmelCase = v.shape[0]
lowerCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __a ( A__ ) -> Dict:
lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
lowerCAmelCase = download(A__ )
lowerCAmelCase = MODELS_MAP[model_name]["sample_rate"]
lowerCAmelCase = MODELS_MAP[model_name]["sample_size"]
lowerCAmelCase = Object()
lowerCAmelCase = sample_size
lowerCAmelCase = sample_rate
lowerCAmelCase = 0
lowerCAmelCase = UNetaDModel(sample_size=A__ , sample_rate=A__ )
lowerCAmelCase = diffusers_model.state_dict()
lowerCAmelCase = DiffusionUncond(A__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=A__ )["state_dict"] )
lowerCAmelCase = orig_model.diffusion_ema.eval()
lowerCAmelCase = orig_model.state_dict()
lowerCAmelCase = rename_orig_weights(A__ )
lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A__ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("kernel" ) for k in list(A__ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
lowerCAmelCase = value.squeeze()
lowerCAmelCase = value
diffusers_model.load_state_dict(A__ )
lowerCAmelCase = 100
lowerCAmelCase = 33
lowerCAmelCase = IPNDMScheduler(num_train_timesteps=A__ )
lowerCAmelCase = torch.manual_seed(A__ )
lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=A__ ).to(A__ )
lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=A__ )[:-1]
lowerCAmelCase = get_crash_schedule(A__ )
lowerCAmelCase = DanceDiffusionPipeline(unet=A__ , scheduler=A__ )
lowerCAmelCase = torch.manual_seed(33 )
lowerCAmelCase = pipe(num_inference_steps=A__ , generator=A__ ).audios
lowerCAmelCase = sampling.iplms_sample(A__ , A__ , A__ , {} )
lowerCAmelCase = generated.clamp(-1 , 1 )
lowerCAmelCase = (generated - audio).abs().sum()
lowerCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , A__ )
print("Diff max" , A__ )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase : Tuple = parser.parse_args()
main(args)
| 159 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase : List[Any] = ["text", "image", "audio"]
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def _lowerCAmelCase ( _UpperCamelCase : List ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append('text' )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class A__ :
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
_SCREAMING_SNAKE_CASE =self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_SCREAMING_SNAKE_CASE =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_inputs(self.tool.inputs )
_SCREAMING_SNAKE_CASE =self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
_SCREAMING_SNAKE_CASE =[outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def A ( self : List[str] ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_inputs(self.tool.inputs )
_SCREAMING_SNAKE_CASE =self.tool(*_a )
if not isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
_SCREAMING_SNAKE_CASE =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =create_inputs(self.tool.inputs )
_SCREAMING_SNAKE_CASE =[]
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_SCREAMING_SNAKE_CASE =self.tool(*_a )
if not isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 405 | from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase = HfArgumentParser(InitializationArguments)
lowercase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
lowercase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 240 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase( self ) -> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_uncond_unet
__UpperCamelCase = KarrasVeScheduler()
__UpperCamelCase = KarrasVePipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(num_inference_steps=2 , generator=_SCREAMING_SNAKE_CASE , output_type='numpy' ).images
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(num_inference_steps=2 , generator=_SCREAMING_SNAKE_CASE , output_type='numpy' , return_dict=_SCREAMING_SNAKE_CASE )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Tuple:
__UpperCamelCase = 'google/ncsnpp-celebahq-256'
__UpperCamelCase = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = KarrasVeScheduler()
__UpperCamelCase = KarrasVePipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.manual_seed(0 )
__UpperCamelCase = pipe(num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type='numpy' ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 567 |
def _a ( __lowercase , __lowercase = 0 ) -> list:
"""simple docstring"""
__UpperCamelCase = length or len(__lowercase )
__UpperCamelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__UpperCamelCase , __UpperCamelCase = list_data[i + 1], list_data[i]
__UpperCamelCase = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ : Optional[int] =OmegaConf.load(lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase ) ) )
return config
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
if conf_path is None:
__magic_name__ : List[str] ="""./model_checkpoints/vqgan_only.yaml"""
__magic_name__ : Dict =load_config(lowerCamelCase , display=lowerCamelCase )
__magic_name__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Optional[Any] ="""./model_checkpoints/vqgan_only.pt"""
__magic_name__ : Tuple =torch.load(lowerCamelCase , map_location=lowerCamelCase )
if ".ckpt" in ckpt_path:
__magic_name__ : Any =sd["""state_dict"""]
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
model.to(lowerCamelCase )
del sd
return model
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] =model.encode(lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__magic_name__ : List[Any] =model.decode(lowerCamelCase )
return xrec
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ):
__magic_name__ , __magic_name__ : Optional[int] =string.rsplit(""".""" , 1 )
if reload:
__magic_name__ : Optional[int] =importlib.import_module(lowerCamelCase )
importlib.reload(lowerCamelCase )
return getattr(importlib.import_module(lowerCamelCase , package=lowerCamelCase ) , cls )
def lowerCAmelCase_ ( lowerCamelCase ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True ):
__magic_name__ : str =instantiate_from_config(lowerCamelCase )
if sd is not None:
model.load_state_dict(lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# load the specified checkpoint
if ckpt:
__magic_name__ : str =torch.load(lowerCamelCase , map_location="""cpu""" )
__magic_name__ : Any =pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
__magic_name__ : List[Any] ={"""state_dict""": None}
__magic_name__ : Optional[Any] =None
__magic_name__ : Tuple =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCamelCase , eval_mode=lowerCamelCase )["""model"""]
return model, global_step
| 21 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( a ):
"""simple docstring"""
_a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a = [1_4_4, 1_9_2, 2_4_0]
_a = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_a = [9_6, 1_2_0, 1_4_4]
_a = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_a = [6_4, 8_0, 9_6]
_a = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_a = 0.05
_a = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
_a = 5_1_2
_a = 1_6
_a = 2_1
_a = "pascal-voc-id2label.json"
else:
_a = 1_0_0_0
_a = "imagenet-1k-id2label.json"
_a = "huggingface/label-files"
_a = json.load(open(hf_hub_download(a, a, repo_type="dataset" ), "r" ) )
_a = {int(a ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def __a ( a, a=False ):
"""simple docstring"""
for i in range(1, 6 ):
if F'layer_{i}.' in name:
_a = name.replace(F'layer_{i}.', F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_a = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
_a = name.replace(".block.", "." )
if "exp_1x1" in name:
_a = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
_a = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
_a = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
_a = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
_a = name.replace(".norm.", ".normalization." )
if ".conv." in name:
_a = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
_a = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.layer.{j}.' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.' )
if "expand_1x1" in name:
_a = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
_a = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
_a = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if F'.global_rep.{i}.weight' in name:
_a = name.replace(F'.global_rep.{i}.weight', ".layernorm.weight" )
if F'.global_rep.{i}.bias' in name:
_a = name.replace(F'.global_rep.{i}.bias', ".layernorm.bias" )
if ".global_rep." in name:
_a = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
_a = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
_a = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
_a = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
_a = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
_a = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
_a = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
_a = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
_a = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
_a = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
_a = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
_a = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
_a = "mobilevit." + name
return name
def __a ( a, a, a=False ):
"""simple docstring"""
if base_model:
_a = ""
else:
_a = "mobilevit."
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(a )
if key[:8] == "encoder.":
_a = key[8:]
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[0][6:] ) - 1
_a = int(key_split[3] )
_a = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def __a ( ):
"""simple docstring"""
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(a, stream=a ).raw )
return im
@torch.no_grad()
def __a ( a, a, a, a=False ):
"""simple docstring"""
_a = get_mobilevit_config(a )
# load original state_dict
_a = torch.load(a, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
_a = MobileViTForSemanticSegmentation(a ).eval()
else:
_a = MobileViTForImageClassification(a ).eval()
_a = convert_state_dict(a, a )
model.load_state_dict(a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 3_2 )
_a = image_processor(images=prepare_img(), return_tensors="pt" )
_a = model(**a )
_a = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3], a, atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_a = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_a = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_a = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3], a, atol=1e-4 )
Path(a ).mkdir(exist_ok=a )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a )
if push_to_hub:
_a = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
_a = model_mapping[mobilevit_name]
image_processor.push_to_hub(a, organization="apple" )
model.push_to_hub(a, organization="apple" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 388 | 0 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
A__ : int =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ : List[str] =1
if upper_limit > 0:
A__ : str =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
__A : str = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 595 | """simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Union[str, Any]=0 ):
A__ : Union[str, Any] =np.random.RandomState(UpperCamelCase__ )
A__ : int ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : str =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[str] =self.get_dummy_inputs()
A__ : List[Any] =pipe(**UpperCamelCase__ ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : List[str] =np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : int ):
A__ : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : List[str] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] =self.get_dummy_inputs()
A__ : Optional[int] =pipe(**UpperCamelCase__ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : Optional[Any] =np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : int ):
A__ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Dict =self.get_dummy_inputs()
A__ : List[str] =pipe(**UpperCamelCase__ ).images
A__ : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : Optional[Any] =np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : str =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] =self.get_dummy_inputs()
A__ : List[Any] =pipe(**UpperCamelCase__ ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : Union[str, Any] =np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] =self.get_dummy_inputs()
A__ : Optional[int] =pipe(**UpperCamelCase__ ).images
A__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : str =np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : Any ):
A__ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : Tuple =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : str =self.get_dummy_inputs()
A__ : List[Any] =pipe(**UpperCamelCase__ ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : str =np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self : str ):
A__ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[Any] =self.get_dummy_inputs()
A__ : Optional[Any] =3 * [inputs["prompt"]]
# forward
A__ : Any =pipe(**UpperCamelCase__ )
A__ : str =output.images[0, -3:, -3:, -1]
A__ : Any =self.get_dummy_inputs()
A__ : str =3 * [inputs.pop("prompt" )]
A__ : Dict =pipe.tokenizer(
UpperCamelCase__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="np" , )
A__ : Dict =text_inputs["input_ids"]
A__ : Optional[int] =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A__ : Optional[Any] =prompt_embeds
# forward
A__ : int =pipe(**UpperCamelCase__ )
A__ : Tuple =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def _UpperCAmelCase ( self : str ):
A__ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] =self.get_dummy_inputs()
A__ : Union[str, Any] =3 * ["this is a negative prompt"]
A__ : List[str] =negative_prompt
A__ : int =3 * [inputs["prompt"]]
# forward
A__ : List[Any] =pipe(**UpperCamelCase__ )
A__ : str =output.images[0, -3:, -3:, -1]
A__ : Any =self.get_dummy_inputs()
A__ : str =3 * [inputs.pop("prompt" )]
A__ : Tuple =[]
for p in [prompt, negative_prompt]:
A__ : List[str] =pipe.tokenizer(
UpperCamelCase__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="np" , )
A__ : Tuple =text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A__ , A__ : int =embeds
# forward
A__ : List[str] =pipe(**UpperCamelCase__ )
A__ : List[str] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self : str ):
A__ : List[Any] =ort.SessionOptions()
A__ : int =False
return options
def _UpperCAmelCase ( self : Tuple ):
# using the PNDM scheduler by default
A__ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Any ="A painting of a squirrel eating a burger"
np.random.seed(0 )
A__ : Optional[int] =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
A__ : List[str] =output.images
A__ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Any =np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCAmelCase ( self : List[str] ):
A__ : int =DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A__ : int =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Any ="open neural network exchange"
A__ : Union[str, Any] =np.random.RandomState(0 )
A__ : int =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" )
A__ : List[Any] =output.images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Dict =np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCAmelCase ( self : str ):
A__ : List[Any] =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A__ : Union[str, Any] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[Any] ="open neural network exchange"
A__ : List[str] =np.random.RandomState(0 )
A__ : Tuple =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" )
A__ : Dict =output.images
A__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : int =np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCAmelCase ( self : int ):
A__ : Dict =0
def test_callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : np.ndarray ) -> None:
A__ : Dict =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A__ : Optional[int] =latents[0, -3:, -3:, -1]
A__ : str =np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A__ : Optional[Any] =latents[0, -3:, -3:, -1]
A__ : List[Any] =np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
A__ : Dict =False
A__ : Dict =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : int ="Andromeda galaxy in a bottle"
A__ : Union[str, Any] =np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Dict =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert pipe.safety_checker is None
A__ : str =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
A__ : Any =OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A__ : Dict =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 595 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ) -> Any:
"""simple docstring"""
A = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=UpperCamelCase__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=UpperCamelCase__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=UpperCamelCase__ )
return parser.parse_args()
def __snake_case ( ) -> str:
"""simple docstring"""
A = parse_args()
# Import training_script as a module.
A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A = script_fpath.stem
A = importlib.import_module(UpperCamelCase__ )
# Patch sys.argv
A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 690 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 442 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=13 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : str=224 , SCREAMING_SNAKE_CASE : Tuple=1_000 , SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 3, 6, 4] , SCREAMING_SNAKE_CASE : List[str]=[48, 56, 112, 220] , ):
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : int = num_channels
lowercase__ : Optional[Any] = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : str = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = num_labels
lowercase__ : int = image_size
lowercase__ : Tuple = layer_depths
lowercase__ : Optional[int] = embed_dims
def snake_case ( self : List[str] ):
lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[Any] ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Optional[int] = SwiftFormerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Optional[int] = self.num_labels
lowercase__ : int = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase__ : List[str] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Any ):
(lowercase__) : str = self.prepare_config_and_inputs()
lowercase__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : str ):
lowercase__ : Optional[Any] = SwiftFormerModelTester(self )
lowercase__ : List[str] = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def snake_case ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def snake_case ( self : str ):
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Dict ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Dict = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Any ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Tuple ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : Dict = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
def _config_zero_init(SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Dict = copy.deepcopy(SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1E-1_0 )
if isinstance(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = _config_zero_init(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return configs_no_init
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(config=SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Optional[Any] ):
pass
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Optional[Any] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowercase__ : List[str] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : str = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 714 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A :
def __init__( self : int , _A : Collection[float] | None = None ) -> None:
"""simple docstring"""
if components is None:
lowercase : str = []
lowercase : Tuple = list(_A )
def __len__( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : str ) -> str:
"""simple docstring"""
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self : Union[str, Any] , _A : Vector ) -> Vector:
"""simple docstring"""
lowercase : Any = len(self )
if size == len(_A ):
lowercase : int = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('''must have the same size''' )
def __sub__( self : List[str] , _A : Vector ) -> Vector:
"""simple docstring"""
lowercase : Optional[Any] = len(self )
if size == len(_A ):
lowercase : str = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : List[str] , _A : float ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : List[Any] , _A : Vector ) -> float:
"""simple docstring"""
...
def __mul__( self : Optional[int] , _A : float | Vector ) -> float | Vector:
"""simple docstring"""
if isinstance(_A , (float, int) ):
lowercase : Union[str, Any] = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
lowercase : List[Any] = len(self )
lowercase : str = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('''invalid operand!''' )
def __a ( self : Tuple ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def __a ( self : Union[str, Any] , _A : int ) -> float:
"""simple docstring"""
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __a ( self : str , _A : int , _A : float ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
lowercase : Union[str, Any] = value
def __a ( self : str ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
lowercase : Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def __a ( self : int , _A : Vector , _A : bool = False ) -> float:
"""simple docstring"""
lowercase : Union[str, Any] = self * other
lowercase : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
assert isinstance(_a , _a )
return Vector([0] * dimension )
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(_a , _a ) and (isinstance(_a , _a ))
lowercase : Union[str, Any] = [0] * dimension
lowercase : Any = 1
return Vector(_a )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
assert (
isinstance(_a , _a )
and isinstance(_a , _a )
and (isinstance(_a , (int, float) ))
)
return x * scalar + y
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
random.seed(_a )
lowercase : Union[str, Any] = [random.randint(_a , _a ) for _ in range(_a )]
return Vector(_a )
class _A :
def __init__( self : int , _A : list[list[float]] , _A : int , _A : int ) -> None:
"""simple docstring"""
lowercase : List[str] = matrix
lowercase : Optional[int] = w
lowercase : Optional[Any] = h
def __str__( self : Any ) -> str:
"""simple docstring"""
lowercase : Dict = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : List[Any] , _A : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowercase : Dict = []
for i in range(self.__height ):
lowercase : Any = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Optional[Any] , _A : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowercase : Union[str, Any] = []
for i in range(self.__height ):
lowercase : Union[str, Any] = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : List[str] , _A : float ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : Any , _A : Vector ) -> Vector:
"""simple docstring"""
...
def __mul__( self : str , _A : float | Vector ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
lowercase : Union[str, Any] = zero_vector(self.__height )
for i in range(self.__height ):
lowercase : Optional[int] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(_A , (int, float) ): # matrix-scalar
lowercase : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.__height
def __a ( self : Tuple ) -> int:
"""simple docstring"""
return self.__width
def __a ( self : Any , _A : int , _A : int ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __a ( self : Tuple , _A : int , _A : int , _A : float ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase : str = value
else:
raise Exception('''change_component: indices out of bounds''' )
def __a ( self : int , _A : int , _A : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
lowercase : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
lowercase : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def __a ( self : int , _A : int , _A : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('''Indices out of bounds''' )
def __a ( self : Dict ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : list[list[float]] = [[0] * n for _ in range(_a )]
return Matrix(_a , _a , _a )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
random.seed(_a )
lowercase : list[list[float]] = [
[random.randint(_a , _a ) for _ in range(_a )] for _ in range(_a )
]
return Matrix(_a , _a , _a ) | 217 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a)))
SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Dict = original.generator(_a)
SCREAMING_SNAKE_CASE : Any = new_model(
_a , _a , _a , _a , _a)[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a)
SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_a))
SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 25 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case , _snake_case : Any = image.size
_snake_case , _snake_case : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_snake_case : int = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
_snake_case : Tuple = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
_snake_case : str = image[None].transpose(0 , 3 , 1 , 2 )
_snake_case : str = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Any, a_: VQModel, a_: UNetaDModel, a_: Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
], ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self: Dict, a_: Union[torch.Tensor, PIL.Image.Image] = None, a_: Optional[int] = 1, a_: Optional[int] = 100, a_: Optional[float] = 0.0, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[str] = "pil", a_: bool = True, ):
'''simple docstring'''
if isinstance(a_, PIL.Image.Image ):
_snake_case : List[Any] = 1
elif isinstance(a_, torch.Tensor ):
_snake_case : int = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a_ )}" )
if isinstance(a_, PIL.Image.Image ):
_snake_case : Optional[int] = preprocess(a_ )
_snake_case , _snake_case : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
_snake_case : Any = next(self.unet.parameters() ).dtype
_snake_case : int = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ )
_snake_case : List[str] = image.to(device=self.device, dtype=a_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a_, device=self.device )
_snake_case : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_snake_case : Optional[int] = {}
if accepts_eta:
_snake_case : Optional[Any] = eta
for t in self.progress_bar(a_ ):
# concat latents and low resolution image in the channel dimension.
_snake_case : List[str] = torch.cat([latents, image], dim=1 )
_snake_case : int = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
_snake_case : Any = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case : Dict = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VQVAE
_snake_case : Tuple = self.vqvae.decode(a_ ).sample
_snake_case : Dict = torch.clamp(a_, -1.0, 1.0 )
_snake_case : Optional[Any] = image / 2 + 0.5
_snake_case : List[Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
_snake_case : List[str] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 28 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'fnet'
def __init__( self , lowercase=32_000 , lowercase=768 , lowercase=12 , lowercase=3_072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=512 , lowercase=4 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=512 , lowercase=3 , lowercase=1 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 532 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__SCREAMING_SNAKE_CASE = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__SCREAMING_SNAKE_CASE = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__SCREAMING_SNAKE_CASE = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int = CHRF.CHAR_ORDER , __lowerCamelCase : int = CHRF.WORD_ORDER , __lowerCamelCase : int = CHRF.BETA , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , ) -> Dict:
A : Any = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A : str = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
A : List[Any] = CHRF(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A : List[str] = sb_chrf.corpus_score(__lowerCamelCase , __lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 17 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 17 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
SCREAMING_SNAKE_CASE__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCamelCase__ ) )
return round(UpperCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : List[str] = np.asarray(weights[0] )
snake_case_ : Dict = np.asarray(weights[1] )
snake_case_ : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : Tuple = np.asarray(weights[0] )
snake_case_ : List[Any] = np.asarray(weights[1] )
snake_case_ : Dict = np.asarray(weights[2] )
snake_case_ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
# layernorm 1
snake_case_ : str = weights[0][0][0]
snake_case_ : Tuple = np.asarray(layer_norm_a[0] )
snake_case_ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# lsh weights + output
snake_case_ : Dict = weights[0][1]
if len(lowerCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
else:
set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
# intermediate weighs
snake_case_ : Tuple = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase_ ) == 4:
snake_case_ : Dict = intermediate_weights[2]
# layernorm 2
snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] )
snake_case_ : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# intermediate dense
snake_case_ : Optional[Any] = np.asarray(intermediate_weights[1][0] )
snake_case_ : str = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
# intermediate out
snake_case_ : Optional[int] = np.asarray(intermediate_weights[4][0] )
snake_case_ : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
# reformer model
snake_case_ : List[Any] = torch_model.reformer
# word embeds
snake_case_ : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , )
if isinstance(weights[3] , lowerCamelCase_ ):
snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ : Any = nn.Parameter(torch.tensor(lowerCamelCase_ ) )
snake_case_ : Tuple = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# output layer norm
snake_case_ : str = np.asarray(weights[7][0] )
snake_case_ : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# output embeddings
snake_case_ : Dict = np.asarray(weights[9][0] )
snake_case_ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ):
'''simple docstring'''
# Initialise PyTorch model
snake_case_ : Dict = ReformerConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ : Dict = ReformerModelWithLMHead(lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case_ : Tuple = pickle.load(lowerCamelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 334 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCAmelCase : int = 25_0004
__lowerCAmelCase : Union[str, Any] = 25_0020
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MBartTokenizer
_lowerCamelCase = MBartTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = MBartTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = MBartTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : str = tokenizer_r.save_pretrained(_lowercase )
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case_ : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : List[str] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : List[str] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : str = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : List[Any] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : str = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : str = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Dict = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : Optional[Any] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = '''facebook/mbart-large-en-ro'''
_lowerCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCamelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCAmelCase__ ( cls ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case_ : Union[str, Any] = 1
return cls
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
snake_case_ : Tuple = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case_ : Optional[int] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _lowercase )
snake_case_ : int = 1_0
snake_case_ : str = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowercase )
self.assertEqual(len(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
snake_case_ : str = MBartTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors="""pt""" )
snake_case_ : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case_ : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="""pt""" )
snake_case_ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=1_0 , return_tensors="""pt""" )
snake_case_ : Tuple = targets["""input_ids"""]
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "imagegpt"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :int , __A :int=512 + 1 , __A :int=32 * 32 , __A :Any=512 , __A :Any=24 , __A :Tuple=8 , __A :Tuple=None , __A :Optional[Any]="quick_gelu" , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :int=0.1 , __A :Dict=1E-5 , __A :List[str]=0.0_2 , __A :Optional[int]=True , __A :str=True , __A :Union[str, Any]=False , __A :Dict=False , __A :List[Any]=False , **__A :str , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
@property
def _snake_case ( self :str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self :Dict , __A :"FeatureExtractionMixin" , __A :int = 1 , __A :int = -1 , __A :bool = False , __A :Optional["TensorType"] = None , __A :int = 3 , __A :int = 32 , __A :int = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs | 6 |
snake_case_ : List[Any] = "Alexander Joslin"
import operator as op
from .stack import Stack
def __a ( __UpperCAmelCase : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCamelCase_ : Stack[int] = Stack()
lowerCamelCase_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCAmelCase )
elif i == ")":
# RULE 4
lowerCamelCase_ : Optional[int] = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ : Optional[int] = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ : Any = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ : int = operators[opr](__UpperCAmelCase , __UpperCAmelCase )
operand_stack.push(__UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case_ : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 488 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __snake_case ( _UpperCAmelCase : float, _UpperCAmelCase : float, _UpperCAmelCase : int):
UpperCamelCase = x
UpperCamelCase = y
for step in range(_UpperCAmelCase): # noqa: B007
UpperCamelCase = a * a - b * b + x
UpperCamelCase = 2 * a * b + y
UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __snake_case ( _UpperCAmelCase : float):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __snake_case ( _UpperCAmelCase : float):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(_UpperCAmelCase, 1, 1))
def __snake_case ( _UpperCAmelCase : int = 800, _UpperCAmelCase : int = 600, _UpperCAmelCase : float = -0.6, _UpperCAmelCase : float = 0, _UpperCAmelCase : float = 3.2, _UpperCAmelCase : int = 50, _UpperCAmelCase : bool = True, ):
UpperCamelCase = Image.new('''RGB''', (image_width, image_height))
UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCAmelCase):
for image_y in range(_UpperCAmelCase):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase = figure_width / image_width * image_height
UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase = get_distance(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase = get_color_coded_rgb(_UpperCAmelCase)
else:
UpperCamelCase = get_black_and_white_rgb(_UpperCAmelCase)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 350 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''encodec'''
def __init__( self , lowerCamelCase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase__=2_4_0_0_0 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_2_8 , lowerCamelCase__=3_2 , lowerCamelCase__=1 , lowerCamelCase__=[8, 5, 4, 2] , lowerCamelCase__="weight_norm" , lowerCamelCase__=7 , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__="reflect" , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=1.0 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=None , lowerCamelCase__=True , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = target_bandwidths
UpperCamelCase = sampling_rate
UpperCamelCase = audio_channels
UpperCamelCase = normalize
UpperCamelCase = chunk_length_s
UpperCamelCase = overlap
UpperCamelCase = hidden_size
UpperCamelCase = num_filters
UpperCamelCase = num_residual_layers
UpperCamelCase = upsampling_ratios
UpperCamelCase = norm_type
UpperCamelCase = kernel_size
UpperCamelCase = last_kernel_size
UpperCamelCase = residual_kernel_size
UpperCamelCase = dilation_growth_rate
UpperCamelCase = use_causal_conv
UpperCamelCase = pad_mode
UpperCamelCase = compress
UpperCamelCase = num_lstm_layers
UpperCamelCase = trim_right_ratio
UpperCamelCase = codebook_size
UpperCamelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase__ )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 350 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.