code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase (unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict):
UpperCamelCase__ : int = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a)
UpperCamelCase__ : Union[str, Any] = GenerationConfig.from_pretrained(__a , config_name=__a)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , __a)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : List[str] = AutoConfig.from_pretrained('gpt2')
UpperCamelCase__ : Optional[int] = GenerationConfig.from_model_config(__a)
UpperCamelCase__ : Dict = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : str = GenerationConfig()
UpperCamelCase__ : Optional[int] = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
UpperCamelCase__ : Any = copy.deepcopy(__a)
UpperCamelCase__ : List[str] = generation_config.update(**__a)
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {'foo': 'bar'})
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Tuple = GenerationConfig()
UpperCamelCase__ : Any = """bar"""
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(__a)
UpperCamelCase__ : List[str] = GenerationConfig.from_pretrained(__a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar')
UpperCamelCase__ : Dict = GenerationConfig.from_model_config(__a)
assert not hasattr(__a , 'foo') # no new kwargs should be initialized if from config
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __a)
self.assertEqual(default_config.num_beams , 1)
UpperCamelCase__ : int = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __a)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a)
UpperCamelCase__ : Dict = GenerationConfig.from_pretrained(__a , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __a)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class __lowercase (unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : Any):
UpperCamelCase__ : Dict = TOKEN
HfFolder.save_token(__a)
@classmethod
def __UpperCamelCase ( cls : List[Any]):
try:
delete_repo(token=cls._token , repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def __UpperCamelCase ( self : int):
UpperCamelCase__ : int = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token)
UpperCamelCase__ : Union[str, Any] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='test-generation-config' , push_to_hub=__a , use_auth_token=self._token)
UpperCamelCase__ : str = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Any = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token)
UpperCamelCase__ : Any = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='valid_org/test-generation-config-org' , push_to_hub=__a , use_auth_token=self._token)
UpperCamelCase__ : Optional[Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a)) | 719 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
for attribute in key.split('.'):
UpperCamelCase__ : Optional[Any] = getattr(_UpperCamelCase , _UpperCamelCase)
if weight_type is not None:
UpperCamelCase__ : int = getattr(_UpperCamelCase , _UpperCamelCase).shape
else:
UpperCamelCase__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Tuple = value
elif weight_type == "weight_g":
UpperCamelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCamelCase__ : Dict = value
elif weight_type == "bias":
UpperCamelCase__ : Optional[Any] = value
else:
UpperCamelCase__ : Tuple = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Dict = fairseq_model.state_dict()
UpperCamelCase__ : Optional[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase__ : int = name.split(_UpperCamelCase)[0].split('.')[-2]
UpperCamelCase__ : Dict = mapped_key.replace('*' , _UpperCamelCase)
if "weight_g" in name:
UpperCamelCase__ : List[str] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : List[Any] = 'weight_v'
elif "weight" in name:
UpperCamelCase__ : Dict = 'weight'
elif "bias" in name:
UpperCamelCase__ : Optional[Any] = 'bias'
else:
UpperCamelCase__ : Optional[int] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
continue
if not is_used:
unused_weights.append(_UpperCamelCase)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Optional[int] = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : Union[str, Any] = name.split('.')
UpperCamelCase__ : int = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_UpperCamelCase)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : List[Any] = SEWConfig()
if is_finetuned:
UpperCamelCase__ : Optional[int] = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase__ : Any = model.cfg
UpperCamelCase__ : List[str] = fs_config.conv_bias
UpperCamelCase__ : Optional[int] = eval(fs_config.conv_feature_layers)
UpperCamelCase__ : Tuple = [x[0] for x in conv_layers]
UpperCamelCase__ : List[str] = [x[1] for x in conv_layers]
UpperCamelCase__ : str = [x[2] for x in conv_layers]
UpperCamelCase__ : Any = 'gelu'
UpperCamelCase__ : Optional[int] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
UpperCamelCase__ : Tuple = 0.0
UpperCamelCase__ : Dict = fs_config.activation_fn.name
UpperCamelCase__ : Tuple = fs_config.encoder_embed_dim
UpperCamelCase__ : Union[str, Any] = 0.02
UpperCamelCase__ : Any = fs_config.encoder_ffn_embed_dim
UpperCamelCase__ : List[str] = 1e-5
UpperCamelCase__ : int = fs_config.encoder_layerdrop
UpperCamelCase__ : List[Any] = fs_config.encoder_attention_heads
UpperCamelCase__ : Union[str, Any] = fs_config.conv_pos_groups
UpperCamelCase__ : Optional[Any] = fs_config.conv_pos
UpperCamelCase__ : str = len(_UpperCamelCase)
UpperCamelCase__ : List[Any] = fs_config.encoder_layers
UpperCamelCase__ : Optional[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase__ : Optional[int] = model.cfg
UpperCamelCase__ : int = fs_config.final_dropout
UpperCamelCase__ : Dict = fs_config.layerdrop
UpperCamelCase__ : Optional[int] = fs_config.activation_dropout
UpperCamelCase__ : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase__ : Union[str, Any] = fs_config.attention_dropout
UpperCamelCase__ : Optional[int] = fs_config.dropout_input
UpperCamelCase__ : Dict = fs_config.dropout
UpperCamelCase__ : str = fs_config.mask_channel_length
UpperCamelCase__ : Dict = fs_config.mask_channel_prob
UpperCamelCase__ : Union[str, Any] = fs_config.mask_length
UpperCamelCase__ : Tuple = fs_config.mask_prob
UpperCamelCase__ : Dict = 'Wav2Vec2FeatureExtractor'
UpperCamelCase__ : List[str] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Union[str, Any]:
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
if config_path is not None:
UpperCamelCase__ : Union[str, Any] = SEWConfig.from_pretrained(_UpperCamelCase)
else:
UpperCamelCase__ : List[Any] = convert_config(model[0] , _UpperCamelCase)
UpperCamelCase__ : Dict = model[0].eval()
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
if is_finetuned:
if dict_path:
UpperCamelCase__ : List[Any] = Dictionary.load(_UpperCamelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : int = target_dict.pad_index
UpperCamelCase__ : Any = target_dict.bos_index
UpperCamelCase__ : Dict = target_dict.pad_index
UpperCamelCase__ : Tuple = target_dict.bos_index
UpperCamelCase__ : str = target_dict.eos_index
UpperCamelCase__ : Union[str, Any] = len(target_dict.symbols)
UpperCamelCase__ : Union[str, Any] = os.path.join(_UpperCamelCase , 'vocab.json')
if not os.path.isdir(_UpperCamelCase):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase))
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase)
with open(_UpperCamelCase , 'w' , encoding='utf-8') as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase)
UpperCamelCase__ : str = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
UpperCamelCase__ : List[Any] = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase)
processor.save_pretrained(_UpperCamelCase)
UpperCamelCase__ : List[Any] = SEWForCTC(_UpperCamelCase)
else:
UpperCamelCase__ : List[str] = SEWModel(_UpperCamelCase)
feature_extractor.save_pretrained(_UpperCamelCase)
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
hf_model.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = 'src/transformers'
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'\[([^\]]+)\]')
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : List[Any] = _re_indent.search(lowerCamelCase_)
return "" if search is None else search.groups()[0]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None) -> Tuple:
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Dict = code.split('\n')
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_):
index += 1
UpperCamelCase__ : Any = ['\n'.join(lines[:index])]
else:
UpperCamelCase__ : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ : List[str] = [lines[index]]
index += 1
while index < len(lowerCamelCase_) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(lowerCamelCase_) > 0 and get_indent(current_block[-1]).startswith(indent_level + ' '):
current_block.append(lines[index])
blocks.append('\n'.join(lowerCamelCase_))
if index < len(lowerCamelCase_) - 1:
UpperCamelCase__ : Optional[int] = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ : int = []
else:
blocks.append('\n'.join(lowerCamelCase_))
UpperCamelCase__ : Any = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_) > 0:
blocks.append('\n'.join(lowerCamelCase_))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_):
blocks.append('\n'.join(lines[index:]))
return blocks
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
def _inner(lowerCamelCase_):
return key(lowerCamelCase_).lower().replace('_' , '')
return _inner
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Optional[int]:
# If no key is provided, we use a noop.
def noop(lowerCamelCase_):
return x
if key is None:
UpperCamelCase__ : str = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ : List[Any] = [obj for obj in objects if key(lowerCamelCase_).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ : str = [obj for obj in objects if key(lowerCamelCase_)[0].isupper() and not key(lowerCamelCase_).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ : str = [obj for obj in objects if not key(lowerCamelCase_)[0].isupper()]
UpperCamelCase__ : List[Any] = ignore_underscore(lowerCamelCase_)
return sorted(lowerCamelCase_ , key=lowerCamelCase_) + sorted(lowerCamelCase_ , key=lowerCamelCase_) + sorted(lowerCamelCase_ , key=lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_):
UpperCamelCase__ : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
UpperCamelCase__ : List[Any] = [part.strip().replace('"' , '') for part in imports.split(',')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
UpperCamelCase__ : Optional[Any] = keys[:-1]
return "[" + ", ".join([f'\"{k}\"' for k in sort_objects(lowerCamelCase_)]) + "]"
UpperCamelCase__ : Dict = import_statement.split('\n')
if len(lowerCamelCase_) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ : Tuple = 2 if lines[1].strip() == '[' else 1
UpperCamelCase__ : str = [(i, _re_strip_line.search(lowerCamelCase_).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
UpperCamelCase__ : Union[str, Any] = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_: x[1])
UpperCamelCase__ : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(lowerCamelCase_) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
UpperCamelCase__ : Optional[Any] = _re_bracket_content.sub(_replace , lines[1])
else:
UpperCamelCase__ : Tuple = [part.strip().replace('"' , '') for part in lines[1].split(',')]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
UpperCamelCase__ : List[str] = keys[:-1]
UpperCamelCase__ : Dict = get_indent(lines[1]) + ', '.join([f'\"{k}\"' for k in sort_objects(lowerCamelCase_)])
return "\n".join(lowerCamelCase_)
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ : Dict = _re_bracket_content.sub(_replace , lowerCamelCase_)
return import_statement
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=True) -> Dict:
with open(lowerCamelCase_ , encoding='utf-8') as f:
UpperCamelCase__ : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ : Tuple = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:')
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ : int = main_blocks[block_idx]
UpperCamelCase__ : str = block.split('\n')
# Get to the start of the imports.
UpperCamelCase__ : Optional[Any] = 0
while line_idx < len(lowerCamelCase_) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ : str = len(lowerCamelCase_)
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ : Dict = '\n'.join(block_lines[line_idx:-1])
UpperCamelCase__ : List[Any] = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ : Any = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_)
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ : List[Any] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ : Tuple = [(pattern.search(lowerCamelCase_).groups()[0] if pattern.search(lowerCamelCase_) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ : Union[str, Any] = [(i, key) for i, key in enumerate(lowerCamelCase_) if key is not None]
UpperCamelCase__ : int = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Optional[Any] = []
for i in range(len(lowerCamelCase_)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
UpperCamelCase__ : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(lowerCamelCase_)
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ : Union[str, Any] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(lowerCamelCase_):
if check_only:
return True
else:
print(f'Overwriting {file}.')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as f:
f.write('\n'.join(lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_=True) -> int:
UpperCamelCase__ : Union[str, Any] = []
for root, _, files in os.walk(lowerCamelCase_):
if "__init__.py" in files:
UpperCamelCase__ : Any = sort_imports(os.path.join(lowerCamelCase_ , '__init__.py') , check_only=lowerCamelCase_)
if result:
UpperCamelCase__ : Tuple = [os.path.join(lowerCamelCase_ , '__init__.py')]
if len(lowerCamelCase_) > 0:
raise ValueError(f'Would overwrite {len(lowerCamelCase_)} files, run `make style`.')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 721 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 0 |
'''simple docstring'''
from math import isqrt
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE) + 1))
def __UpperCAmelCase ( lowerCamelCase_ = 10**6) -> int:
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['PerceiverFeatureExtractor']
lowerCAmelCase__ = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowercase :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , ):
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Union[str, Any] = 13
UpperCamelCase__ : Dict = 7
UpperCamelCase__ : Optional[int] = 30
UpperCamelCase__ : Optional[Any] = self.seq_length + self.mem_len
UpperCamelCase__ : Optional[Any] = 15
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : Dict = 99
UpperCamelCase__ : Optional[Any] = [10, 50, 80]
UpperCamelCase__ : List[str] = 32
UpperCamelCase__ : str = 32
UpperCamelCase__ : Tuple = 4
UpperCamelCase__ : Tuple = 8
UpperCamelCase__ : Dict = 128
UpperCamelCase__ : str = 2
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : List[str] = 3
UpperCamelCase__ : Dict = self.vocab_size - 1
UpperCamelCase__ : Tuple = 0.01
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[Any] = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __UpperCamelCase ( self : List[str]):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Optional[int] = TFTransfoXLModel(lowercase__)
UpperCamelCase__ : List[Any] = model(lowercase__).to_tuple()
UpperCamelCase__ : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
UpperCamelCase__ : Tuple = model(lowercase__).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : str = TFTransfoXLLMHeadModel(lowercase__)
UpperCamelCase__ : Dict = model(lowercase__).to_tuple()
UpperCamelCase__ : int = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
UpperCamelCase__ : Optional[int] = model(lowercase__).to_tuple()
UpperCamelCase__ : List[str] = model([input_ids_a, mems_a]).to_tuple()
UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
UpperCamelCase__ : List[str] = model(lowercase__).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Optional[Any] = TFTransfoXLForSequenceClassification(lowercase__)
UpperCamelCase__ : Dict = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(UpperCamelCase__) : Tuple = config_and_inputs
UpperCamelCase__ : int = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __lowercase (a__ , a__ , unittest.TestCase ):
_lowerCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_lowerCamelCase = () if is_tf_available() else ()
_lowerCamelCase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[Any] = TFTransfoXLModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=lowercase__ , d_embed=37)
def __UpperCamelCase ( self : str):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[int]):
self.model_tester.set_seed()
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase__)
def __UpperCamelCase ( self : Any):
self.model_tester.set_seed()
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase__)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase__)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(lowercase__)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
UpperCamelCase__ : List[Any] = model.get_output_embeddings()
assert isinstance(lowercase__ , tf.keras.layers.Layer)
UpperCamelCase__ : str = model.get_bias()
assert name is None
else:
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
assert x is None
UpperCamelCase__ : List[str] = model.get_bias()
assert name is None
def __UpperCamelCase ( self : int):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : List[str] = TFTransfoXLModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.')
def __UpperCamelCase ( self : int):
pass
@require_tf
class __lowercase (unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.')
@slow
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
# fmt: off
UpperCamelCase__ : Union[str, Any] = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase__ : Optional[int] = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase__ : str = model.generate(lowercase__ , max_length=200 , do_sample=lowercase__)
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase__)
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
from itertools import permutations
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase__ : Tuple = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE__):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __UpperCAmelCase ( lowerCamelCase_ = 10) -> Optional[int]:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)))
for num in permutations(range(SCREAMING_SNAKE_CASE__))
if is_substring_divisible(SCREAMING_SNAKE_CASE__))
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
@staticmethod
def __UpperCamelCase ( *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
pass
@is_pipeline_test
@require_vision
class __lowercase (unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
UpperCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Dict = image_classifier(__snake_case , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
UpperCamelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(__snake_case) , [
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
] , )
@require_tf
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Tuple = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
UpperCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Optional[Any] = image_classifier(__snake_case , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(__snake_case) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
UpperCamelCase__ : int = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(__snake_case) , [
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
[
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
{'score': 0.3_33, 'label': ANY(__snake_case)},
],
] , )
@slow
@require_torch
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : str = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Optional[Any] = image_classifier(__snake_case , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(__snake_case) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
UpperCamelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(__snake_case) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Union[str, Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Dict = image_classifier(__snake_case , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(__snake_case) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
UpperCamelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(__snake_case) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger('transformers.models.speecht5')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
hf_model.apply_weight_norm()
UpperCamelCase__ : List[Any] = checkpoint['input_conv.weight_g']
UpperCamelCase__ : Union[str, Any] = checkpoint['input_conv.weight_v']
UpperCamelCase__ : List[Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates)):
UpperCamelCase__ : Optional[Any] = checkpoint[f'upsamples.{i}.1.weight_g']
UpperCamelCase__ : Tuple = checkpoint[f'upsamples.{i}.1.weight_v']
UpperCamelCase__ : Optional[Any] = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
for j in range(len(config.resblock_dilation_sizes)):
UpperCamelCase__ : Optional[Any] = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
UpperCamelCase__ : Optional[Any] = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
UpperCamelCase__ : Any = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
UpperCamelCase__ : Dict = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
UpperCamelCase__ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
UpperCamelCase__ : List[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
UpperCamelCase__ : str = checkpoint['output_conv.1.weight_g']
UpperCamelCase__ : Dict = checkpoint['output_conv.1.weight_v']
UpperCamelCase__ : str = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase__ : Dict = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : Tuple = SpeechTaHifiGanConfig()
UpperCamelCase__ : List[str] = SpeechTaHifiGan(lowerCamelCase_)
UpperCamelCase__ : Dict = torch.load(lowerCamelCase_)
load_weights(orig_checkpoint['model']['generator'] , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = np.load(lowerCamelCase_)
UpperCamelCase__ : Tuple = stats[0].reshape(-1)
UpperCamelCase__ : List[str] = stats[1].reshape(-1)
UpperCamelCase__ : List[Any] = torch.from_numpy(lowerCamelCase_).float()
UpperCamelCase__ : List[Any] = torch.from_numpy(lowerCamelCase_).float()
model.save_pretrained(lowerCamelCase_)
if repo_id:
print('Pushing to the hub...')
model.push_to_hub(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 'Muhammad Umer Farooq'
lowerCAmelCase__ = 'MIT'
lowerCAmelCase__ = '1.0.0'
lowerCAmelCase__ = 'Muhammad Umer Farooq'
lowerCAmelCase__ = '[email protected]'
lowerCAmelCase__ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __lowercase (_UpperCamelCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : str):
super().__init__()
UpperCamelCase__ : list[str] = []
UpperCamelCase__ : int = domain
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : list[tuple[str, str | None]]):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase__ : Union[str, Any] = parse.urljoin(self.domain , __a)
self.urls.append(__a)
def __UpperCAmelCase ( lowerCamelCase_) -> str:
return ".".join(get_sub_domain_name(lowercase_).split('.')[-2:])
def __UpperCAmelCase ( lowerCamelCase_) -> str:
return parse.urlparse(lowercase_).netloc
def __UpperCAmelCase ( lowerCamelCase_ = "https://github.com") -> list[str]:
UpperCamelCase__ : int = get_domain_name(lowercase_)
# Initialize the parser
UpperCamelCase__ : Any = Parser(lowercase_)
try:
# Open URL
UpperCamelCase__ : Optional[int] = requests.get(lowercase_)
# pass the raw HTML to the parser to get links
parser.feed(r.text)
# Get links and loop through
UpperCamelCase__ : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase__ : Optional[int] = requests.get(lowercase_)
# Get the valid email.
UpperCamelCase__ : Union[str, Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text)
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_)
except ValueError:
pass
except ValueError:
raise SystemExit(1)
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_)
if __name__ == "__main__":
lowerCAmelCase__ = emails_from_url('https://github.com')
print(f'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if not isinstance(__UpperCamelCase , __UpperCamelCase):
raise TypeError('only integers accepted as input')
else:
UpperCamelCase__ : List[str] = str(abs(__UpperCamelCase))
UpperCamelCase__ : Dict = [list(__UpperCamelCase) for char in range(len(__UpperCamelCase))]
for index in range(len(__UpperCamelCase)):
num_transpositions[index].pop(__UpperCamelCase)
return max(
int(''.join(list(__UpperCamelCase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__('doctest').testmod() | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session')
def __UpperCAmelCase ( ) -> int:
UpperCamelCase__ : Tuple = 10
UpperCamelCase__ : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string')),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'])),
'answers': datasets.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
'id': datasets.Value('int64'),
})
UpperCamelCase__ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_)),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : str = str(tmp_path_factory.mktemp('data') / 'file.arrow')
dataset.map(cache_file_name=lowerCamelCase_)
return filename
# FILE_CONTENT + files
lowerCAmelCase__ = """\
Text data.
Second line of data."""
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Tuple = tmp_path_factory.mktemp('data') / 'file.txt'
UpperCamelCase__ : Tuple = FILE_CONTENT
with open(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_)
return filename
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
import bza
UpperCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data') / 'file.txt.bz2'
UpperCamelCase__ : Optional[int] = bytes(lowerCamelCase_ , 'utf-8')
with bza.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
import gzip
UpperCamelCase__ : List[str] = str(tmp_path_factory.mktemp('data') / 'file.txt.gz')
UpperCamelCase__ : Dict = bytes(lowerCamelCase_ , 'utf-8')
with gzip.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'file.txt.lz4'
UpperCamelCase__ : Union[str, Any] = bytes(lowerCamelCase_ , 'utf-8')
with lza.frame.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data') / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w') as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
import tarfile
UpperCamelCase__ : List[Any] = tmp_path_factory.mktemp('data') / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w') as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> str:
import lzma
UpperCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data') / 'file.txt.xz'
UpperCamelCase__ : Tuple = bytes(lowerCamelCase_ , 'utf-8')
with lzma.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
import zipfile
UpperCamelCase__ : Tuple = tmp_path_factory.mktemp('data') / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data') / 'file.txt.zst'
UpperCamelCase__ : Tuple = bytes(lowerCamelCase_ , 'utf-8')
with zstd.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data') / 'file.xml'
UpperCamelCase__ : Any = textwrap.dedent(
'\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>')
with open(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_)
return filename
lowerCAmelCase__ = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
lowerCAmelCase__ = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
lowerCAmelCase__ = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
lowerCAmelCase__ = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session')
def __UpperCAmelCase ( ) -> Optional[int]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp('data') / 'dataset.arrow')
dataset.map(cache_file_name=lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data') / 'dataset.sqlite')
with contextlib.closing(sqlitea.connect(lowerCamelCase_)) as con:
UpperCamelCase__ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)')
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data') / 'dataset.csv')
with open(lowerCamelCase_ , 'w' , newline='') as f:
UpperCamelCase__ : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'])
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data') / 'dataset2.csv')
with open(lowerCamelCase_ , 'w' , newline='') as f:
UpperCamelCase__ : Optional[int] = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'])
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
import bza
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb') as f:
UpperCamelCase__ : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data') / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data') / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV')))
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV')))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data') / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_)))
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_)))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data') / 'dataset.parquet')
UpperCamelCase__ : str = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
})
with open(lowerCamelCase_ , 'wb') as f:
UpperCamelCase__ : Dict = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_)
UpperCamelCase__ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_))] for k in DATA[0]} , schema=lowerCamelCase_)
writer.write_table(lowerCamelCase_)
writer.close()
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data') / 'dataset.json')
UpperCamelCase__ : List[str] = {'data': DATA}
with open(lowerCamelCase_ , 'w') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data') / 'dataset.json')
UpperCamelCase__ : Dict = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : int = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl')
with open(lowerCamelCase_ , 'w') as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_) + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data') / 'dataset2.jsonl')
with open(lowerCamelCase_ , 'w') as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_) + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Tuple = str(tmp_path_factory.mktemp('data') / 'dataset_312.jsonl')
with open(lowerCamelCase_ , 'w') as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_) + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : str = str(tmp_path_factory.mktemp('data') / 'dataset-str.jsonl')
with open(lowerCamelCase_ , 'w') as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_) + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
import gzip
UpperCamelCase__ : int = str(tmp_path_factory.mktemp('data') / 'dataset.txt.gz')
with open(lowerCamelCase_ , 'rb') as orig_file:
with gzip.open(lowerCamelCase_ , 'wb') as zipped_file:
zipped_file.writelines(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
import gzip
UpperCamelCase__ : Any = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl.gz')
with open(lowerCamelCase_ , 'rb') as orig_file:
with gzip.open(lowerCamelCase_ , 'wb') as zipped_file:
zipped_file.writelines(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : str = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_)))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data') / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_)))
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_)))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data') / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w') as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w') as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_)))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[str] = ['0', '1', '2', '3']
UpperCamelCase__ : Dict = str(tmp_path_factory.mktemp('data') / 'dataset.txt')
with open(lowerCamelCase_ , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : List[Any] = ['0', '1', '2', '3']
UpperCamelCase__ : Any = str(tmp_path_factory.mktemp('data') / 'dataset2.txt')
with open(lowerCamelCase_ , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : str = ['0', '1', '2', '3']
UpperCamelCase__ : Tuple = tmp_path_factory.mktemp('data') / 'dataset.abc'
with open(lowerCamelCase_ , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data') / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_)))
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_)))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data') / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext'))
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext'))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Optional[int] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'])
UpperCamelCase__ : str = str(tmp_path_factory.mktemp('data') / 'dataset_with_unicode_new_lines.txt')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as f:
f.write(lowerCamelCase_)
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg')
@pytest.fixture(scope='session')
def __UpperCAmelCase ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav')
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Tuple = tmp_path_factory.mktemp('data') / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w') as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_))
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_).replace('.jpg' , '2.jpg'))
return path
@pytest.fixture(scope='session')
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Dict = tmp_path_factory.mktemp('data_dir')
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w') as f:
f.write('foo\n' * 10)
with open(data_dir / 'subdir' / 'test.txt' , 'w') as f:
f.write('bar\n' * 10)
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w') as f:
f.write('bar\n' * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w') as f:
f.write('foo\n' * 10)
with open(data_dir / '.subdir' / 'test.txt' , 'w') as f:
f.write('bar\n' * 10)
return data_dir
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase__ = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
lowerCAmelCase__ = '▁'
class __lowercase (UpperCamelCase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Optional[int]="</s>" , UpperCAmelCase_ : Union[str, Any]="</s>" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<pad>" , UpperCAmelCase_ : str="<mask>" , UpperCAmelCase_ : Union[str, Any] = None , **UpperCAmelCase_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a) if isinstance(_a , _a) else mask_token
UpperCamelCase__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
UpperCamelCase__ : int = vocab_file
UpperCamelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_a))
UpperCamelCase__ : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCamelCase__ : Any = len(self.sp_model) - 1
UpperCamelCase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : List[Any] = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a)
if token_ids_a is None:
return [1] + ([0] * len(_a)) + [1]
return [1] + ([0] * len(_a)) + [1, 1] + ([0] * len(_a)) + [1]
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] = None):
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __UpperCamelCase ( self : int):
return len(self.sp_model)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(_a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str]):
return self.sp_model.encode(_a , out_type=_a)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : Optional[int] = self.sp_model.PieceToId(_a)
return spm_id if spm_id else self.unk_token_id
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int]):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : str = """"""
UpperCamelCase__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a) + token
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : Dict = []
else:
current_sub_tokens.append(_a)
UpperCamelCase__ : Optional[Any] = False
out_string += self.sp_model.decode(_a)
return out_string.strip()
def __getstate__( self : Optional[int]):
UpperCamelCase__ : List[Any] = self.__dict__.copy()
UpperCamelCase__ : Union[str, Any] = None
return state
def __setstate__( self : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] = None):
if not os.path.isdir(_a):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : Optional[Any] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _a)
elif not os.path.isfile(self.vocab_file):
with open(_a , 'wb') as fi:
UpperCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a)
return (out_vocab_file,)
| 709 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : list[Any] = []
UpperCamelCase__ : int = 0
UpperCamelCase__ : int = 0
def __UpperCamelCase ( self : str):
return self.head == self.tail
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str]):
self.data.append(UpperCamelCase__)
UpperCamelCase__ : List[Any] = self.tail + 1
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Tuple = self.data[self.head]
UpperCamelCase__ : List[str] = self.head + 1
return ret
def __UpperCamelCase ( self : Dict):
return self.tail - self.head
def __UpperCamelCase ( self : Union[str, Any]):
print(self.data)
print('**************')
print(self.data[self.head : self.tail])
class __lowercase :
def __init__( self : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Union[str, Any] = data
UpperCamelCase__ : MyNode | None = None
UpperCamelCase__ : MyNode | None = None
UpperCamelCase__ : int = 1
def __UpperCamelCase ( self : Optional[int]):
return self.data
def __UpperCamelCase ( self : Optional[int]):
return self.left
def __UpperCamelCase ( self : Dict):
return self.right
def __UpperCamelCase ( self : List[str]):
return self.height
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int):
UpperCamelCase__ : int = data
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Any = node
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Union[str, Any] = node
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Tuple = height
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if node is None:
return 0
return node.get_height()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
if a > b:
return a
return b
def __UpperCAmelCase ( lowerCamelCase_) -> MyNode:
print('left rotation node:' , node.get_data())
UpperCamelCase__ : Optional[int] = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(__UpperCamelCase)
UpperCamelCase__ : int = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__UpperCamelCase)
UpperCamelCase__ : Optional[Any] = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(__UpperCamelCase)
return ret
def __UpperCAmelCase ( lowerCamelCase_) -> MyNode:
print('right rotation node:' , node.get_data())
UpperCamelCase__ : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(__UpperCamelCase)
UpperCamelCase__ : Optional[int] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__UpperCamelCase)
UpperCamelCase__ : List[str] = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(__UpperCamelCase)
return ret
def __UpperCAmelCase ( lowerCamelCase_) -> MyNode:
UpperCamelCase__ : Dict = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCamelCase))
return right_rotation(__UpperCamelCase)
def __UpperCAmelCase ( lowerCamelCase_) -> MyNode:
UpperCamelCase__ : List[str] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCamelCase))
return left_rotation(__UpperCamelCase)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> MyNode | None:
if node is None:
return MyNode(__UpperCamelCase)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCamelCase))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
UpperCamelCase__ : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase__ : int = right_rotation(__UpperCamelCase)
else:
UpperCamelCase__ : Dict = lr_rotation(__UpperCamelCase)
else:
node.set_right(insert_node(node.get_right() , __UpperCamelCase))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
UpperCamelCase__ : List[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase__ : List[Any] = rl_rotation(__UpperCamelCase)
else:
UpperCamelCase__ : Optional[Any] = left_rotation(__UpperCamelCase)
UpperCamelCase__ : Optional[Any] = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(__UpperCamelCase)
return node
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
while True:
UpperCamelCase__ : Optional[Any] = root.get_right()
if right_child is None:
break
UpperCamelCase__ : Union[str, Any] = right_child
return root.get_data()
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
while True:
UpperCamelCase__ : str = root.get_left()
if left_child is None:
break
UpperCamelCase__ : List[str] = left_child
return root.get_data()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> MyNode | None:
UpperCamelCase__ : str = root.get_left()
UpperCamelCase__ : Optional[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase__ : str = get_left_most(__UpperCamelCase)
root.set_data(__UpperCamelCase)
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase))
elif left_child is not None:
UpperCamelCase__ : str = left_child
elif right_child is not None:
UpperCamelCase__ : Any = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data')
return root
else:
root.set_left(del_node(__UpperCamelCase , __UpperCamelCase))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase))
if get_height(__UpperCamelCase) - get_height(__UpperCamelCase) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
UpperCamelCase__ : int = left_rotation(__UpperCamelCase)
else:
UpperCamelCase__ : Any = rl_rotation(__UpperCamelCase)
elif get_height(__UpperCamelCase) - get_height(__UpperCamelCase) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
UpperCamelCase__ : Dict = right_rotation(__UpperCamelCase)
else:
UpperCamelCase__ : Optional[Any] = lr_rotation(__UpperCamelCase)
UpperCamelCase__ : int = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(__UpperCamelCase)
return root
class __lowercase :
def __init__( self : Dict):
UpperCamelCase__ : MyNode | None = None
def __UpperCamelCase ( self : Optional[Any]):
return get_height(self.root)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int):
print('insert:' + str(UpperCamelCase__))
UpperCamelCase__ : Tuple = insert_node(self.root , UpperCamelCase__)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[Any]):
print('delete:' + str(UpperCamelCase__))
if self.root is None:
print('Tree is empty!')
return
UpperCamelCase__ : Union[str, Any] = del_node(self.root , UpperCamelCase__)
def __str__( self : Dict , ): # a level traversale, gives a more intuitive look on the tree
UpperCamelCase__ : Union[str, Any] = ''''''
UpperCamelCase__ : Union[str, Any] = MyQueue()
q.push(self.root)
UpperCamelCase__ : int = self.get_height()
if layer == 0:
return output
UpperCamelCase__ : List[Any] = 0
while not q.is_empty():
UpperCamelCase__ : Optional[int] = q.pop()
UpperCamelCase__ : int = ''' ''' * int(math.pow(2 , layer - 1))
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__)
q.push(UpperCamelCase__)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
UpperCamelCase__ : Dict = cnt + 1
for i in range(100):
if cnt == math.pow(2 , UpperCamelCase__) - 1:
UpperCamelCase__ : Tuple = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCAmelCase__ = AVLtree()
lowerCAmelCase__ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 710 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : List[Any] = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_)
UpperCamelCase__ : str = downstream_dict['projector.weight']
UpperCamelCase__ : Union[str, Any] = downstream_dict['projector.bias']
UpperCamelCase__ : str = downstream_dict['model.post_net.linear.weight']
UpperCamelCase__ : List[Any] = downstream_dict['model.post_net.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[str] = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_)
UpperCamelCase__ : str = downstream_dict['model.linear.weight']
UpperCamelCase__ : Union[str, Any] = downstream_dict['model.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : List[Any] = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_)
UpperCamelCase__ : str = downstream_dict['connector.weight']
UpperCamelCase__ : Any = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
UpperCamelCase__ : Optional[Any] = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
UpperCamelCase__ : Union[str, Any] = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
UpperCamelCase__ : Any = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase__ : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase__ : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase__ : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase__ : str = downstream_dict['objective.W']
return model
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Dict = torch.load(lowerCamelCase_ , map_location='cpu')
UpperCamelCase__ : List[str] = checkpoint['Downstream']
UpperCamelCase__ : Union[str, Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification'):
UpperCamelCase__ : Tuple = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
elif arch.endswith('ForAudioFrameClassification'):
UpperCamelCase__ : Dict = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
elif arch.endswith('ForXVector'):
UpperCamelCase__ : str = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}')
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ : Union[str, Any] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase_)
hf_model.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 711 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowercase (_A ):
_lowerCamelCase = ''''''
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , UpperCAmelCase_ : int = "" , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : Tuple = None , **UpperCAmelCase_ : List[Any]):
super().__init__(self , **UpperCAmelCase_)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ : str = fsspec.open(
UpperCAmelCase_ , mode='rb' , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase__ : Any = os.path.basename(self.file.path.split('::')[0])
UpperCamelCase__ : List[Any] = (
self.compressed_name[: self.compressed_name.rindex('.')]
if "." in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ : Optional[int] = None
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , UpperCAmelCase_ : Tuple):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(UpperCAmelCase_).lstrip('/')
def __UpperCamelCase ( self : List[str]):
if self.dir_cache is None:
UpperCamelCase__ : Any = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
UpperCamelCase__ : List[str] = {f["name"]: f}
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Optional[int]):
return self.file.open().read()
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] = "rb" , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : List[str] = self._strip_protocol(UpperCAmelCase_)
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'')
return self.file.open()
class __lowercase (_A ):
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''.bz2'''
class __lowercase (_A ):
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''.gz'''
class __lowercase (_A ):
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''.lz4'''
class __lowercase (_A ):
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''.xz'''
class __lowercase (_A ):
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''.zst'''
def __init__( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] = "rb" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : List[str] = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_ : Any , ):
super().__init__(
fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ : Optional[Any] = self.file.__enter__
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = file_
def __enter__( self : Union[str, Any]):
self._file.__enter__()
return self
def __exit__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_)
def __iter__( self : Tuple):
return iter(self._file)
def __UpperCamelCase ( self : Tuple):
return next(self._file)
def __getattr__( self : List[str] , UpperCAmelCase_ : Optional[Any]):
return getattr(self._file , UpperCAmelCase_)
def fixed_enter(*UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_))
UpperCamelCase__ : List[str] = fixed_enter
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
@staticmethod
def __UpperCamelCase ( *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any]):
pass
@is_pipeline_test
@require_vision
class __lowercase (unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Tuple = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
UpperCamelCase__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Optional[Any] = image_classifier(__A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__A) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
UpperCamelCase__ : int = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(__A) , [
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
] , )
@require_tf
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
UpperCamelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Union[str, Any] = image_classifier(__A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(__A) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
UpperCamelCase__ : str = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(__A) , [
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
[
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
{'score': 0.3_33, 'label': ANY(__A)},
],
] , )
@slow
@require_torch
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : str = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : Optional[int] = image_classifier(__A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(__A) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
UpperCamelCase__ : str = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(__A) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Optional[int] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
UpperCamelCase__ : List[str] = image_classifier(__A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(__A) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
UpperCamelCase__ : str = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(__A) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase (UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(__A , 'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(__A , 'num_attention_heads'))
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=640 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[int]="silu" , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Optional[Any] = last_hidden_size
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : Optional[Any] = hidden_act
UpperCamelCase__ : Union[str, Any] = conv_kernel_size
UpperCamelCase__ : List[str] = output_stride
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : int = classifier_dropout_prob
UpperCamelCase__ : Tuple = use_labels
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Optional[Any] = scope
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : int = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
UpperCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : Tuple):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
UpperCamelCase__ : List[Any] = MobileViTModel(config=__A)
model.to(__A)
model.eval()
UpperCamelCase__ : Any = model(__A)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Tuple = self.num_labels
UpperCamelCase__ : List[Any] = MobileViTForImageClassification(__A)
model.to(__A)
model.eval()
UpperCamelCase__ : str = model(__A , labels=__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Optional[int] = self.num_labels
UpperCamelCase__ : Any = MobileViTForSemanticSegmentation(__A)
model.to(__A)
model.eval()
UpperCamelCase__ : List[str] = model(__A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase__ : Optional[Any] = model(__A , labels=__A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = config_and_inputs
UpperCamelCase__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowerCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = MobileViTModelTester(self)
UpperCamelCase__ : Tuple = MobileViTConfigTester(self , config_class=__A , has_text_modality=__A)
def __UpperCamelCase ( self : List[str]):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def __UpperCamelCase ( self : Dict):
pass
@unittest.skip(reason='MobileViT does not output attentions')
def __UpperCamelCase ( self : Dict):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(__A)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : str = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __A)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : str):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def __UpperCamelCase ( self : Any):
def check_hidden_states_output(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = model_class(__A)
model.to(__A)
model.eval()
with torch.no_grad():
UpperCamelCase__ : int = model(**self._prepare_for_class(__A , __A))
UpperCamelCase__ : Union[str, Any] = outputs.hidden_states
UpperCamelCase__ : int = 5
self.assertEqual(len(__A) , __A)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase__ : Tuple = 2
for i in range(len(__A)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(__A , __A , __A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : Tuple = True
check_hidden_states_output(__A , __A , __A)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A)
@slow
def __UpperCamelCase ( self : List[Any]):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[Any] = MobileViTModel.from_pretrained(__A)
self.assertIsNotNone(__A)
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int]):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(__A)
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : List[Any] = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=__A , return_tensors='pt').to(__A)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**__A)
# verify the logits
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __A)
UpperCamelCase__ : Optional[int] = torch.tensor([-1.93_64, -1.23_27, -0.46_53]).to(__A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4))
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Dict = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
UpperCamelCase__ : Any = model.to(__A)
UpperCamelCase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
UpperCamelCase__ : str = prepare_img()
UpperCamelCase__ : Dict = image_processor(images=__A , return_tensors='pt').to(__A)
# forward pass
with torch.no_grad():
UpperCamelCase__ : str = model(**__A)
UpperCamelCase__ : List[Any] = outputs.logits
# verify the logits
UpperCamelCase__ : Dict = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , __A)
UpperCamelCase__ : int = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.98_68, -9.71_32], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4))
@slow
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
UpperCamelCase__ : int = model.to(__A)
UpperCamelCase__ : List[str] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
UpperCamelCase__ : Union[str, Any] = prepare_img()
UpperCamelCase__ : Any = image_processor(images=__A , return_tensors='pt').to(__A)
# forward pass
with torch.no_grad():
UpperCamelCase__ : int = model(**__A)
UpperCamelCase__ : Any = outputs.logits.detach().cpu()
UpperCamelCase__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)])
UpperCamelCase__ : Optional[int] = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , __A)
UpperCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__A)
UpperCamelCase__ : Any = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , __A)
| 714 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 0 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 715 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 0 |
'''simple docstring'''
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( lowerCamelCase_) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ : Any = terminalreporter.config.getoption('--make-reports')
if make_reports:
pytest_terminal_summary_main(lowerCamelCase_ , id=lowerCamelCase_)
| 716 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = BlenderbotSmallTokenizer
_lowerCamelCase = False
def __UpperCamelCase ( self : List[str]):
super().setUp()
UpperCamelCase__ : Union[str, Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
UpperCamelCase__ : List[Any] = dict(zip(__A , range(len(__A))))
UpperCamelCase__ : Dict = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
UpperCamelCase__ : str = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__A))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Optional[Any]):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Tuple = 'adapt act apte'
UpperCamelCase__ : Tuple = 'adapt act apte'
return input_text, output_text
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase__ : int = 'adapt act apte'
UpperCamelCase__ : str = ['adapt', 'act', 'ap@@', 'te']
UpperCamelCase__ : Optional[int] = tokenizer.tokenize(__A)
self.assertListEqual(__A , __A)
UpperCamelCase__ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase__ : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , __A)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
assert tok('sam').input_ids == [1_384]
UpperCamelCase__ : Union[str, Any] = 'I am a small frog.'
UpperCamelCase__ : Tuple = tok([src_text] , padding=__A , truncation=__A)['input_ids']
UpperCamelCase__ : int = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
UpperCamelCase__ : Dict = 'I am a small frog .'
UpperCamelCase__ : str = '.'
UpperCamelCase__ : Optional[int] = tok(__A)['input_ids']
UpperCamelCase__ : List[Any] = tok(__A)['input_ids']
assert encoded[-1] == encoded_dot[0]
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__snake_case ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any]):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase)
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
def wrapper(*lowerCamelCase_ , **lowerCamelCase_):
UpperCamelCase__ : List[Any] = timeit.default_timer()
UpperCamelCase__ : List[str] = func(*lowercase_ , **lowercase_)
UpperCamelCase__ : Any = timeit.default_timer() - starttime
return delta
UpperCamelCase__ : List[str] = func.__name__
return wrapper
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=100 , lowerCamelCase_=None) -> str:
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = seq_shapes or {}
for i in range(lowercase_):
UpperCamelCase__ : Any = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(lowercase_ , _ArrayXD):
UpperCamelCase__ : Union[str, Any] = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(lowercase_ , datasets.Value):
if v.dtype == "string":
UpperCamelCase__ : Optional[Any] = """The small grey turtle was surprisingly fast when challenged."""
else:
UpperCamelCase__ : List[str] = np.random.randint(10 , size=1).astype(v.dtype).item()
elif isinstance(lowercase_ , datasets.Sequence):
while isinstance(lowercase_ , datasets.Sequence):
UpperCamelCase__ : Optional[Any] = v.feature
UpperCamelCase__ : str = seq_shapes[k]
UpperCamelCase__ : Dict = np.random.rand(*lowercase_).astype(v.dtype)
UpperCamelCase__ : List[Any] = data
dummy_data.append((i, example))
return dummy_data
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=100 , lowerCamelCase_=None) -> str:
UpperCamelCase__ : Union[str, Any] = generate_examples(lowercase_ , num_examples=lowercase_ , seq_shapes=lowercase_)
with ArrowWriter(features=lowercase_ , path=lowercase_) as writer:
for key, record in dummy_data:
UpperCamelCase__ : List[Any] = features.encode_example(lowercase_)
writer.write(lowercase_)
UpperCamelCase__ : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.')
UpperCamelCase__ : Optional[Any] = datasets.Dataset.from_file(filename=lowercase_ , info=datasets.DatasetInfo(features=lowercase_))
return dataset | 719 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
def __UpperCAmelCase ( ) -> Node | None:
UpperCamelCase__ : Union[str, Any] = Node(1)
UpperCamelCase__ : str = Node(2)
UpperCamelCase__ : List[str] = Node(3)
UpperCamelCase__ : Optional[int] = Node(4)
UpperCamelCase__ : Tuple = Node(5)
return tree
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return (max(height(root.left) , height(root.right)) + 1) if root else 0
def __UpperCAmelCase ( lowerCamelCase_) -> Sequence[Node | None]:
UpperCamelCase__ : Optional[Any] = []
if root is None:
return output
UpperCamelCase__ : int = deque([root])
while process_queue:
UpperCamelCase__ : Optional[int] = process_queue.popleft()
output.append(node.data)
if node.left:
process_queue.append(node.left)
if node.right:
process_queue.append(node.right)
return output
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Sequence[Node | None]:
UpperCamelCase__ : int = []
def populate_output(lowerCamelCase_ , lowerCamelCase_) -> None:
if not root:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.left , level - 1)
populate_output(root.right , level - 1)
populate_output(lowerCAmelCase__ , lowerCAmelCase__)
return output
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Sequence[Node | None]:
UpperCamelCase__ : Union[str, Any] = []
def populate_output(lowerCamelCase_ , lowerCamelCase_) -> None:
if root is None:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.right , level - 1)
populate_output(root.left , level - 1)
populate_output(lowerCAmelCase__ , lowerCAmelCase__)
return output
def __UpperCAmelCase ( lowerCamelCase_) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : List[Any] = height(lowerCAmelCase__)
for h in range(1 , height_tree + 1):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase__ , lowerCAmelCase__))
UpperCamelCase__ : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase__ , lowerCAmelCase__))
UpperCamelCase__ : Union[str, Any] = 0
return output
def __UpperCAmelCase ( ) -> None: # Main function for testing.
UpperCamelCase__ : int = make_tree()
print(f'In-order Traversal: {inorder(lowerCAmelCase__)}')
print(f'Pre-order Traversal: {preorder(lowerCAmelCase__)}')
print(f'Post-order Traversal: {postorder(lowerCAmelCase__)}' , '\n')
print(f'Height of Tree: {height(lowerCAmelCase__)}' , '\n')
print('Complete Level Order Traversal: ')
print(level_order(lowerCAmelCase__) , '\n')
print('Level-wise order Traversal: ')
for level in range(1 , height(lowerCAmelCase__) + 1):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase__ , level=lowerCAmelCase__))
print('\nZigZag order Traversal: ')
print(zigzag(lowerCAmelCase__))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """Hello, World!"""
lowerCAmelCase__ = """en_XX"""
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : Any = Path('data_bin')
UpperCamelCase__ : int = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCamelCase).parent) , checkpoint_file=Path(_lowerCamelCase).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_lowerCamelCase) , bpe='sentencepiece' , sentencepiece_model=str(Path(_lowerCamelCase).parent / 'sentencepiece.bpe.model') , src_dict=str(data_dir / 'dict.txt') , )
xmod.eval() # disable dropout
print(_lowerCamelCase)
UpperCamelCase__ : Dict = xmod.model.encoder.sentence_encoder
UpperCamelCase__ : str = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCamelCase__ : Tuple = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , _lowerCamelCase)
UpperCamelCase__ : List[str] = XmodForSequenceClassification(_lowerCamelCase) if classification_head else XmodForMaskedLM(_lowerCamelCase)
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase__ : str = xmod_sent_encoder.embed_tokens.weight
UpperCamelCase__ : int = xmod_sent_encoder.embed_positions.weight
UpperCamelCase__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
UpperCamelCase__ : Any = xmod_sent_encoder.layernorm_embedding.weight
UpperCamelCase__ : Tuple = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
UpperCamelCase__ : Any = model.roberta.encoder.layer[i]
UpperCamelCase__ : List[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCamelCase__ : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('Dimensions of self-attention weights do not match.')
UpperCamelCase__ : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCamelCase__ : Tuple = xmod_layer.self_attn.q_proj.bias
UpperCamelCase__ : str = xmod_layer.self_attn.k_proj.weight
UpperCamelCase__ : Optional[Any] = xmod_layer.self_attn.k_proj.bias
UpperCamelCase__ : Tuple = xmod_layer.self_attn.v_proj.weight
UpperCamelCase__ : List[str] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase__ : List[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.')
UpperCamelCase__ : Optional[Any] = xmod_layer.self_attn.out_proj.weight
UpperCamelCase__ : int = xmod_layer.self_attn.out_proj.bias
UpperCamelCase__ : Any = xmod_layer.self_attn_layer_norm.weight
UpperCamelCase__ : List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCamelCase__ : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.')
UpperCamelCase__ : int = xmod_layer.fca.weight
UpperCamelCase__ : List[str] = xmod_layer.fca.bias
# output
UpperCamelCase__ : int = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.')
UpperCamelCase__ : Optional[Any] = xmod_layer.fca.weight
UpperCamelCase__ : int = xmod_layer.fca.bias
UpperCamelCase__ : Union[str, Any] = xmod_layer.final_layer_norm.weight
UpperCamelCase__ : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCamelCase__ : str = xmod_layer.adapter_layer_norm.weight
UpperCamelCase__ : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('Lists of language adapters do not match.')
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCamelCase__ : Optional[int] = bert_output.adapter_modules[lang_code]
UpperCamelCase__ : Optional[int] = xmod_layer.adapter_modules[lang_code]
UpperCamelCase__ : Union[str, Any] = from_adapter.fca.weight
UpperCamelCase__ : Tuple = from_adapter.fca.bias
UpperCamelCase__ : Optional[int] = from_adapter.fca.weight
UpperCamelCase__ : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCamelCase__ : Any = xmod_sent_encoder.layer_norm.weight
UpperCamelCase__ : Optional[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCamelCase__ : Optional[Any] = xmod.model.classification_heads['mnli'].dense.weight
UpperCamelCase__ : List[str] = xmod.model.classification_heads['mnli'].dense.bias
UpperCamelCase__ : Any = xmod.model.classification_heads['mnli'].out_proj.weight
UpperCamelCase__ : Any = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCamelCase__ : List[str] = xmod.model.encoder.lm_head.dense.weight
UpperCamelCase__ : Tuple = xmod.model.encoder.lm_head.dense.bias
UpperCamelCase__ : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCamelCase__ : Dict = xmod.model.encoder.lm_head.layer_norm.bias
UpperCamelCase__ : List[str] = xmod.model.encoder.lm_head.weight
UpperCamelCase__ : Optional[int] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase__ : Tuple = xmod.encode(_lowerCamelCase).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(_lowerCamelCase)
UpperCamelCase__ : Any = model(_lowerCamelCase)[0]
if classification_head:
UpperCamelCase__ : str = xmod.model.classification_heads['mnli'](xmod.extract_features(_lowerCamelCase))
else:
UpperCamelCase__ : Union[str, Any] = xmod.model(_lowerCamelCase , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
UpperCamelCase__ : List[Any] = torch.max(torch.abs(our_output - their_output)).item()
print(f'max_absolute_diff = {max_absolute_diff}') # ~ 1e-7
UpperCamelCase__ : Dict = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3)
print('Do both models output the same tensors?' , '🔥' if success else '💩')
if not success:
raise Exception('Something went wRoNg')
Path(_lowerCamelCase).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 721 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str: # noqa: E741
while r - l > 1:
UpperCamelCase__ : List[str] = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ : Dict = m
else:
UpperCamelCase__ : Any = m # noqa: E741
return r
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
if len(lowerCamelCase_) == 0:
return 0
UpperCamelCase__ : Optional[int] = [0] * len(lowerCamelCase_)
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = v[0]
for i in range(1 , len(lowerCamelCase_)):
if v[i] < tail[0]:
UpperCamelCase__ : Tuple = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ : Dict = v[i]
length += 1
else:
UpperCamelCase__ : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
from itertools import permutations
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase__ : Any = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __UpperCAmelCase ( lowerCamelCase_ = 10) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)))
for num in permutations(range(SCREAMING_SNAKE_CASE_))
if is_substring_divisible(SCREAMING_SNAKE_CASE_))
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Union[str, Any] = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_)))
UpperCamelCase__ : Union[str, Any] = 0
while arr[min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) - 1] < x:
UpperCamelCase__ : Any = step
step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE_)))
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase__ : str = prev + 1
if prev == min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
lowerCAmelCase__ = int(input('Enter the number to be searched:\n'))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'''Number {x} is at index {res}''')
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__A ):
_lowerCamelCase = '''segformer'''
def __init__( self : List[Any] , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase_ : int=[32, 64, 160, 256] , UpperCAmelCase_ : str=[7, 3, 3, 3] , UpperCAmelCase_ : int=[4, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[1, 2, 5, 8] , UpperCAmelCase_ : List[str]=[4, 4, 4, 4] , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]=1e-6 , UpperCAmelCase_ : Tuple=256 , UpperCAmelCase_ : Any=255 , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : int = num_encoder_blocks
UpperCamelCase__ : Optional[int] = depths
UpperCamelCase__ : Optional[int] = sr_ratios
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Any = strides
UpperCamelCase__ : Union[str, Any] = mlp_ratios
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = classifier_dropout_prob
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Any = drop_path_rate
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Optional[int] = decoder_hidden_size
UpperCamelCase__ : Any = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : Tuple = semantic_loss_ignore_index
class __lowercase (__A ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Dict):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = 0) -> Optional[Any]:
UpperCamelCase__ : Any = length or len(lowerCAmelCase__)
UpperCamelCase__ : Dict = False
for i in range(length - 1):
if list_data[i] > list_data[i + 1]:
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = list_data[i + 1], list_data[i]
UpperCamelCase__ : int = True
return list_data if not swapped else bubble_sort(lowerCAmelCase__ , length - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''ctrl'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any=246_534 , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : List[Any]=1_280 , UpperCAmelCase_ : Dict=8_192 , UpperCAmelCase_ : List[str]=48 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=1e-6 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Any , ):
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : Any = n_positions
UpperCamelCase__ : Optional[int] = n_embd
UpperCamelCase__ : List[Any] = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : str = dff
UpperCamelCase__ : Tuple = resid_pdrop
UpperCamelCase__ : Any = embd_pdrop
UpperCamelCase__ : Dict = layer_norm_epsilon
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Any = use_cache
super().__init__(**a_)
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''owlvit_text_model'''
def __init__( self : Tuple , UpperCAmelCase_ : Any=49_408 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Dict="quick_gelu" , UpperCAmelCase_ : Optional[int]=1e-5 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Tuple=1.0 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Dict=49_406 , UpperCAmelCase_ : str=49_407 , **UpperCAmelCase_ : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : List[Any] = num_hidden_layers
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Any = layer_norm_eps
UpperCamelCase__ : Tuple = attention_dropout
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Optional[int] = initializer_factor
@classmethod
def __UpperCamelCase ( cls : List[Any] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple):
cls._set_token_in_kwargs(UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Dict = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type') == "owlvit":
UpperCamelCase__ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''owlvit_vision_model'''
def __init__( self : List[Any] , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : int=3_072 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Tuple="quick_gelu" , UpperCAmelCase_ : List[str]=1e-5 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = attention_dropout
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Any = initializer_factor
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
cls._set_token_in_kwargs(UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Tuple = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type') == "owlvit":
UpperCamelCase__ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''owlvit'''
_lowerCamelCase = True
def __init__( self : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2.65_92 , UpperCAmelCase_ : Tuple=True , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_)
if text_config is None:
UpperCamelCase__ : Any = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.')
if vision_config is None:
UpperCamelCase__ : Any = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.')
UpperCamelCase__ : int = OwlViTTextConfig(**UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = OwlViTVisionConfig(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = projection_dim
UpperCamelCase__ : Optional[int] = logit_scale_init_value
UpperCamelCase__ : Optional[Any] = return_dict
UpperCamelCase__ : int = 1.0
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int]):
cls._set_token_in_kwargs(UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : List[str] = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
@classmethod
def __UpperCamelCase ( cls : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = text_config
UpperCamelCase__ : List[str] = vision_config
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = copy.deepcopy(self.__dict__)
UpperCamelCase__ : Union[str, Any] = self.text_config.to_dict()
UpperCamelCase__ : str = self.vision_config.to_dict()
UpperCamelCase__ : Union[str, Any] = self.__class__.model_type
return output
class __lowercase (__lowerCamelCase ):
@property
def __UpperCamelCase ( self : List[str]):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
])
@property
def __UpperCamelCase ( self : Dict):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
])
@property
def __UpperCamelCase ( self : Optional[int]):
return 1e-4
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] = -1 , UpperCAmelCase_ : List[str] = -1 , UpperCAmelCase_ : Optional[Any] = None , ):
UpperCamelCase__ : int = super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , framework=UpperCAmelCase_)
UpperCamelCase__ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCAmelCase_ , framework=UpperCAmelCase_)
return {**text_input_dict, **image_input_dict}
@property
def __UpperCamelCase ( self : Dict):
return 14 | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ = Lock()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_A)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase__ : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase__ : Union[str, Any] = min(_A , _A)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_A)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase__ : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase__ : Optional[Any] = max(_A , _A)
# after all swaps are performed, send the values back to main
result_pipe[1].send(_A)
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase__ : Optional[Any] = Pipe()
UpperCamelCase__ : Optional[Any] = Pipe()
process_array_.append(
Process(
target=_A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
UpperCamelCase__ : int = temp_rs
UpperCamelCase__ : Optional[int] = temp_rr
for i in range(1 , len(_A) - 1):
UpperCamelCase__ : List[str] = Pipe()
UpperCamelCase__ : Dict = Pipe()
process_array_.append(
Process(
target=_A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
UpperCamelCase__ : Optional[Any] = temp_rs
UpperCamelCase__ : Any = temp_rr
process_array_.append(
Process(
target=_A , args=(
len(_A) - 1,
arr[len(_A) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_A) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_A)):
UpperCamelCase__ : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : int = list(range(10 , 0 , -1))
print('Initial List')
print(*_A)
UpperCamelCase__ : Dict = odd_even_transposition(_A)
print('Sorted List\n')
print(*_A)
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class __lowercase :
_lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
_lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=_A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = self.task_name.lower()
class __lowercase (_A ):
_lowerCamelCase = '''train'''
_lowerCamelCase = '''dev'''
_lowerCamelCase = '''test'''
class __lowercase (_A ):
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCamelCase__ , )
UpperCamelCase__ : Any = args
UpperCamelCase__ : Any = glue_processors[args.task_name]()
UpperCamelCase__ : Any = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase__ , UpperCamelCase__):
try:
UpperCamelCase__ : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
UpperCamelCase__ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
UpperCamelCase__ : Any = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__, UpperCamelCase__ : List[Any] = label_list[2], label_list[1]
UpperCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any = cached_features_file + '.lock'
with FileLock(UpperCamelCase__):
if os.path.exists(UpperCamelCase__) and not args.overwrite_cache:
UpperCamelCase__ : Union[str, Any] = time.time()
UpperCamelCase__ : List[Any] = torch.load(UpperCamelCase__)
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start)
else:
logger.info(F'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
UpperCamelCase__ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
UpperCamelCase__ : int = self.processor.get_test_examples(args.data_dir)
else:
UpperCamelCase__ : int = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
UpperCamelCase__ : Tuple = examples[:limit_length]
UpperCamelCase__ : Optional[Any] = glue_convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , max_length=args.max_seq_length , label_list=UpperCamelCase__ , output_mode=self.output_mode , )
UpperCamelCase__ : Any = time.time()
torch.save(self.features , UpperCamelCase__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self : Optional[int]):
return len(self.features)
def __getitem__( self : List[Any] , UpperCAmelCase_ : List[str]):
return self.features[i]
def __UpperCamelCase ( self : Union[str, Any]):
return self.label_list
| 709 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
# Checks if the entire collection has been sorted
if len(lowerCamelCase_) <= 1 or n <= 1:
return
insert_next(lowerCamelCase_ , n - 1)
rec_insertion_sort(lowerCamelCase_ , n - 1)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
# Checks order between adjacent elements
if index >= len(lowerCamelCase_) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase__ : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase_ , index + 1)
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter integers separated by spaces: ')
lowerCAmelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 710 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''van'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict=224 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Dict=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[64, 128, 320, 512] , UpperCAmelCase_ : int=[3, 3, 12, 3] , UpperCAmelCase_ : Any=[8, 8, 4, 4] , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : str=1e-6 , UpperCAmelCase_ : Optional[Any]=1e-2 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**__a)
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : List[str] = strides
UpperCamelCase__ : List[Any] = hidden_sizes
UpperCamelCase__ : Any = depths
UpperCamelCase__ : Optional[int] = mlp_ratios
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Tuple = layer_norm_eps
UpperCamelCase__ : Dict = layer_scale_init_value
UpperCamelCase__ : int = drop_path_rate
UpperCamelCase__ : Any = dropout_rate
| 711 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 0 |
'''simple docstring'''
import requests
lowerCAmelCase__ = '' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase__ = 'https://api.openweathermap.org/data/2.5/'
def __UpperCAmelCase ( lowerCamelCase_ = "Chicago" , lowerCamelCase_ = APPID):
return requests.get(URL_BASE + 'weather' , params=locals()).json()
def __UpperCAmelCase ( lowerCamelCase_ = "Kolkata, India" , lowerCamelCase_ = APPID):
return requests.get(URL_BASE + 'forecast' , params=locals()).json()
def __UpperCAmelCase ( lowerCamelCase_ = 55.68 , lowerCamelCase_ = 12.57 , lowerCamelCase_ = APPID):
return requests.get(URL_BASE + 'onecall' , params=locals()).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase__ = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str]):
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 715 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : str = defaultdict(snake_case__)
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase__ : str = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ : List[str] = []
for duplicate_key in duplicates:
UpperCamelCase__ : int = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key})
if len(snake_case__) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.')
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1])
# Sort
return sorted(snake_case__ , key=lambda lowerCamelCase_: s["title"].lower())
def __UpperCAmelCase ( lowerCamelCase_=False) -> Optional[Any]:
with open(snake_case__ , encoding='utf-8') as f:
UpperCamelCase__ : Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase__ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Optional[Any] = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase__ : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase__ : Union[str, Any] = api_doc[model_idx]['sections']
UpperCamelCase__ : List[str] = [(idx, section) for idx, section in enumerate(snake_case__) if 'sections' in section]
UpperCamelCase__ : List[str] = False
for idx, modality_doc in modalities_docs:
UpperCamelCase__ : Dict = modality_doc['sections']
UpperCamelCase__ : int = clean_model_doc_toc(snake_case__)
if old_modality_doc != new_modality_doc:
UpperCamelCase__ : Optional[Any] = True
if overwrite:
UpperCamelCase__ : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
UpperCamelCase__ : Union[str, Any] = model_doc
UpperCamelCase__ : Tuple = api_doc
with open(snake_case__ , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 716 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase__ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase (SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = (DEISMultistepScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 25),)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__)
return config
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple=0 , **UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Tuple = dict(self.forward_default_kwargs)
UpperCamelCase__ : str = kwargs.pop('num_inference_steps' , snake_case__)
UpperCamelCase__ : Optional[Any] = self.dummy_sample
UpperCamelCase__ : Tuple = 0.1 * sample
UpperCamelCase__ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : List[Any] = self.get_scheduler_config(**snake_case__)
UpperCamelCase__ : List[Any] = scheduler_class(**snake_case__)
scheduler.set_timesteps(snake_case__)
# copy over dummy past residuals
UpperCamelCase__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__)
UpperCamelCase__ : Union[str, Any] = scheduler_class.from_pretrained(snake_case__)
new_scheduler.set_timesteps(snake_case__)
# copy over dummy past residuals
UpperCamelCase__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ : Optional[Any] = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1):
UpperCamelCase__ : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__).prev_sample
UpperCamelCase__ : Union[str, Any] = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : str):
pass
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Dict = dict(self.forward_default_kwargs)
UpperCamelCase__ : Dict = kwargs.pop('num_inference_steps' , snake_case__)
UpperCamelCase__ : str = self.dummy_sample
UpperCamelCase__ : Tuple = 0.1 * sample
UpperCamelCase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : List[Any] = self.get_scheduler_config()
UpperCamelCase__ : Dict = scheduler_class(**snake_case__)
scheduler.set_timesteps(snake_case__)
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__)
UpperCamelCase__ : Tuple = scheduler_class.from_pretrained(snake_case__)
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__)
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__).prev_sample
UpperCamelCase__ : Union[str, Any] = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Union[str, Any]):
if scheduler is None:
UpperCamelCase__ : str = self.scheduler_classes[0]
UpperCamelCase__ : int = self.get_scheduler_config(**snake_case__)
UpperCamelCase__ : Dict = scheduler_class(**snake_case__)
UpperCamelCase__ : int = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(**snake_case__)
UpperCamelCase__ : List[Any] = scheduler_class(**snake_case__)
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Any = self.dummy_model()
UpperCamelCase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase__ : List[str] = model(snake_case__ , snake_case__)
UpperCamelCase__ : int = scheduler.step(snake_case__ , snake_case__ , snake_case__).prev_sample
return sample
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = dict(self.forward_default_kwargs)
UpperCamelCase__ : int = kwargs.pop('num_inference_steps' , snake_case__)
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Dict = self.get_scheduler_config()
UpperCamelCase__ : Optional[int] = scheduler_class(**snake_case__)
UpperCamelCase__ : Any = self.dummy_sample
UpperCamelCase__ : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , 'set_timesteps'):
scheduler.set_timesteps(snake_case__)
elif num_inference_steps is not None and not hasattr(snake_case__ , 'set_timesteps'):
UpperCamelCase__ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase__ : int = scheduler.timesteps[5]
UpperCamelCase__ : int = scheduler.timesteps[6]
UpperCamelCase__ : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__).prev_sample
UpperCamelCase__ : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config())
UpperCamelCase__ : Any = self.full_loop(scheduler=snake_case__)
UpperCamelCase__ : Dict = torch.mean(torch.abs(snake_case__))
assert abs(result_mean.item() - 0.2_39_16) < 1e-3
UpperCamelCase__ : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCamelCase__ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCamelCase__ : Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCamelCase__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config)
UpperCamelCase__ : Dict = self.full_loop(scheduler=snake_case__)
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(snake_case__))
assert abs(result_mean.item() - 0.2_39_16) < 1e-3
def __UpperCamelCase ( self : List[str]):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__)
def __UpperCamelCase ( self : List[Any]):
self.check_over_configs(thresholding=snake_case__)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type='deis' , solver_order=snake_case__ , solver_type=snake_case__ , )
def __UpperCamelCase ( self : Any):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__)
def __UpperCamelCase ( self : str):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
UpperCamelCase__ : int = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__).any(), "Samples have nan numbers"
def __UpperCamelCase ( self : Any):
self.check_over_configs(lower_order_final=snake_case__)
self.check_over_configs(lower_order_final=snake_case__)
def __UpperCamelCase ( self : Tuple):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : str = self.full_loop()
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(snake_case__))
assert abs(result_mean.item() - 0.2_39_16) < 1e-3
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Tuple = self.full_loop(prediction_type='v_prediction')
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(snake_case__))
assert abs(result_mean.item() - 0.0_91) < 1e-3
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = self.scheduler_classes[0]
UpperCamelCase__ : Tuple = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0)
UpperCamelCase__ : Optional[int] = scheduler_class(**snake_case__)
UpperCamelCase__ : Tuple = 10
UpperCamelCase__ : Dict = self.dummy_model()
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase__ : Union[str, Any] = model(snake_case__ , snake_case__)
UpperCamelCase__ : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__).prev_sample
assert sample.dtype == torch.floataa
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]: # This function is recursive
UpperCamelCase__ : Tuple = len(__a)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCamelCase__ : List[Any] = array[0]
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : Optional[Any] = [element for element in array[i:] if element >= array[i]]
UpperCamelCase__ : str = longest_subsequence(__a)
if len(__a) > len(__a):
UpperCamelCase__ : int = temp_array
else:
i += 1
UpperCamelCase__ : Optional[Any] = [element for element in array[1:] if element >= pivot]
UpperCamelCase__ : Union[str, Any] = [pivot, *longest_subsequence(__a)]
if len(__a) > len(__a):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase (unittest.TestCase ):
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[Any] = self.dummy_uncond_unet
UpperCamelCase__ : Optional[int] = KarrasVeScheduler()
UpperCamelCase__ : Optional[int] = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_)
pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
UpperCamelCase__ : Dict = torch.manual_seed(0)
UpperCamelCase__ : Dict = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy').images
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : List[str] = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_)[0]
UpperCamelCase__ : int = image[0, -3:, -3:, -1]
UpperCamelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = "google/ncsnpp-celebahq-256"
UpperCamelCase__ : Tuple = UNetaDModel.from_pretrained(snake_case_)
UpperCamelCase__ : Tuple = KarrasVeScheduler()
UpperCamelCase__ : Optional[int] = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_)
pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : List[str] = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy').images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ : Dict = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowercase (unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Any=5 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : Dict=0.02 , ):
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : List[str] = image_size
UpperCamelCase__ : Dict = patch_size
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Any = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : int = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = num_patches + 1
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Optional[Any] = FlaxViTModel(config=UpperCAmelCase__)
UpperCamelCase__ : Any = model(UpperCAmelCase__)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : List[Any] = (self.image_size, self.image_size)
UpperCamelCase__ : Tuple = (self.patch_size, self.patch_size)
UpperCamelCase__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str):
UpperCamelCase__ : Union[str, Any] = self.type_sequence_label_size
UpperCamelCase__ : Dict = FlaxViTForImageClassification(config=UpperCAmelCase__)
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
UpperCamelCase__ : Tuple = 1
UpperCamelCase__ : Any = FlaxViTForImageClassification(UpperCAmelCase__)
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Optional[Any] = model(UpperCAmelCase__)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : str = self.prepare_config_and_inputs()
(
UpperCamelCase__
) : Optional[Any] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase (lowercase__ , unittest.TestCase ):
_lowerCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[Any] = FlaxViTModelTester(self)
UpperCamelCase__ : List[str] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def __UpperCamelCase ( self : Dict):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(UpperCAmelCase__)
UpperCamelCase__ : Tuple = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Tuple = [*signature.parameters.keys()]
UpperCamelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCamelCase__ : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
UpperCamelCase__ : Union[str, Any] = model_class(UpperCAmelCase__)
@jax.jit
def model_jitted(UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__)
with self.subTest('JIT Enabled'):
UpperCamelCase__ : Dict = model_jitted(**UpperCAmelCase__).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
UpperCamelCase__ : Tuple = model_jitted(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def __UpperCamelCase ( self : Tuple):
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224')
UpperCamelCase__ : Tuple = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(UpperCAmelCase__)
| 721 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
lowerCAmelCase__ = '▁'
class __lowercase (__UpperCAmelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
_lowerCamelCase = BarthezTokenizer
def __init__( self : str , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Any="<mask>" , **UpperCAmelCase_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else mask_token
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase__ : Dict = vocab_file
UpperCamelCase__ : List[Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Optional[Any] = [self.cls_token_id]
UpperCamelCase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Tuple = [self.sep_token_id]
UpperCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_lowerCamelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : Union[str, Any] = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase):
copyfile(self.vocab_file , _lowerCamelCase)
return (out_vocab_file,)
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase__ = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase__ = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase__ = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase__ = 'tabby, tabby cat'
lowerCAmelCase__ = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> Optional[Any]:
UpperCamelCase__ : int = {}
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = model.mobilenet_va
else:
UpperCamelCase__ : str = model
UpperCamelCase__ : int = '''MobilenetV1/Conv2d_0/'''
UpperCamelCase__ : Optional[int] = backbone.conv_stem.convolution.weight
UpperCamelCase__ : Dict = backbone.conv_stem.normalization.bias
UpperCamelCase__ : Any = backbone.conv_stem.normalization.weight
UpperCamelCase__ : Optional[int] = backbone.conv_stem.normalization.running_mean
UpperCamelCase__ : int = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase__ : Union[str, Any] = i + 1
UpperCamelCase__ : Any = i * 2
UpperCamelCase__ : List[Any] = backbone.layer[pt_index]
UpperCamelCase__ : List[Any] = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
UpperCamelCase__ : Union[str, Any] = pointer.convolution.weight
UpperCamelCase__ : Dict = pointer.normalization.bias
UpperCamelCase__ : Tuple = pointer.normalization.weight
UpperCamelCase__ : int = pointer.normalization.running_mean
UpperCamelCase__ : List[Any] = pointer.normalization.running_var
UpperCamelCase__ : Optional[Any] = backbone.layer[pt_index + 1]
UpperCamelCase__ : Dict = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
UpperCamelCase__ : Any = pointer.convolution.weight
UpperCamelCase__ : Dict = pointer.normalization.bias
UpperCamelCase__ : Optional[int] = pointer.normalization.weight
UpperCamelCase__ : int = pointer.normalization.running_mean
UpperCamelCase__ : Dict = pointer.normalization.running_var
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
UpperCamelCase__ : Optional[int] = model.classifier.weight
UpperCamelCase__ : Optional[int] = model.classifier.bias
return tf_to_pt_map
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase__ : str = tf.train.list_variables(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}')
UpperCamelCase__ : Optional[Any] = tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = array
# Build TF to PyTorch weights loading map
UpperCamelCase__ : List[str] = _build_tf_to_pytorch_map(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}')
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping')
continue
UpperCamelCase__ : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase__ : List[str] = np.transpose(lowerCamelCase_ , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase__ : Any = array.squeeze().transpose()
else:
UpperCamelCase__ : Optional[Any] = np.transpose(lowerCamelCase_ , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched')
logger.info(f'Initialize PyTorch weight {name} {array.shape}')
UpperCamelCase__ : Optional[int] = torch.from_numpy(lowerCamelCase_)
tf_weights.pop(lowerCamelCase_ , lowerCamelCase_)
tf_weights.pop(name + '/RMSProp' , lowerCamelCase_)
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase_)
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase_)
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}')
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[str] = features.shape[-2:]
UpperCamelCase__ : List[str] = conv_layer.stride
UpperCamelCase__ : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase__ : List[Any] = max(kernel_height - stride_height , 0)
else:
UpperCamelCase__ : List[Any] = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase__ : str = max(kernel_width - stride_width , 0)
else:
UpperCamelCase__ : Dict = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase__ : Union[str, Any] = pad_along_width // 2
UpperCamelCase__ : List[str] = pad_along_width - pad_left
UpperCamelCase__ : Union[str, Any] = pad_along_height // 2
UpperCamelCase__ : Tuple = pad_along_height - pad_top
UpperCamelCase__ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ , lowerCamelCase_ , 'constant' , 0.0)
class __lowercase (nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] = 1 , UpperCAmelCase_ : Optional[Any] = 1 , UpperCAmelCase_ : List[str] = False , UpperCAmelCase_ : str = True , UpperCAmelCase_ : Tuple = True , ):
super().__init__()
UpperCamelCase__ : int = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.')
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.')
UpperCamelCase__ : Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
UpperCamelCase__ : List[Any] = nn.Convad(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , stride=__lowerCamelCase , padding=__lowerCamelCase , groups=__lowerCamelCase , bias=__lowerCamelCase , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase__ : Optional[int] = nn.BatchNormad(
num_features=__lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__lowerCamelCase , track_running_stats=__lowerCamelCase , )
else:
UpperCamelCase__ : List[str] = None
if use_activation:
if isinstance(__lowerCamelCase , __lowerCamelCase):
UpperCamelCase__ : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowerCamelCase):
UpperCamelCase__ : Dict = ACTaFN[config.hidden_act]
else:
UpperCamelCase__ : Optional[int] = config.hidden_act
else:
UpperCamelCase__ : Union[str, Any] = None
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[str]):
if self.config.tf_padding:
UpperCamelCase__ : int = apply_tf_padding(__lowerCamelCase , self.convolution)
UpperCamelCase__ : List[Any] = self.convolution(__lowerCamelCase)
if self.normalization is not None:
UpperCamelCase__ : Optional[int] = self.normalization(__lowerCamelCase)
if self.activation is not None:
UpperCamelCase__ : Any = self.activation(__lowerCamelCase)
return features
class __lowercase (lowerCamelCase__ ):
_lowerCamelCase = MobileNetVaConfig
_lowerCamelCase = load_tf_weights_in_mobilenet_va
_lowerCamelCase = '''mobilenet_v1'''
_lowerCamelCase = '''pixel_values'''
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any]):
if isinstance(__lowerCamelCase , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase__ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase__ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class __lowercase (lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = True):
super().__init__(__lowerCamelCase)
UpperCamelCase__ : str = config
UpperCamelCase__ : List[Any] = 32
UpperCamelCase__ : Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
UpperCamelCase__ : List[Any] = MobileNetVaConvLayer(
__lowerCamelCase , in_channels=config.num_channels , out_channels=__lowerCamelCase , kernel_size=3 , stride=2 , )
UpperCamelCase__ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase__ : str = nn.ModuleList()
for i in range(13):
UpperCamelCase__ : Optional[int] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase__ : Optional[Any] = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCamelCase , ))
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=1 , ))
UpperCamelCase__ : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : List[str] = None , ):
UpperCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
UpperCamelCase__ : Any = self.conv_stem(__lowerCamelCase)
UpperCamelCase__ : int = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
UpperCamelCase__ : Any = layer_module(__lowerCamelCase)
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] = all_hidden_states + (hidden_states,)
UpperCamelCase__ : List[str] = hidden_states
if self.pooler is not None:
UpperCamelCase__ : List[Any] = torch.flatten(self.pooler(__lowerCamelCase) , start_dim=1)
else:
UpperCamelCase__ : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=__lowerCamelCase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase__ , )
class __lowercase (lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : str):
super().__init__(__lowerCamelCase)
UpperCamelCase__ : List[str] = config.num_labels
UpperCamelCase__ : Optional[Any] = MobileNetVaModel(__lowerCamelCase)
UpperCamelCase__ : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase__ : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCamelCase)
UpperCamelCase__ : Dict = nn.Linear(__lowerCamelCase , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Dict = None , ):
UpperCamelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Dict = self.mobilenet_va(__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase)
UpperCamelCase__ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Optional[int] = self.classifier(self.dropout(__lowerCamelCase))
UpperCamelCase__ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : Union[str, Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : Optional[int] = '''single_label_classification'''
else:
UpperCamelCase__ : Dict = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase__ : int = MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
UpperCamelCase__ : Any = loss_fct(__lowerCamelCase , __lowerCamelCase)
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[str] = CrossEntropyLoss()
UpperCamelCase__ : Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[int] = BCEWithLogitsLoss()
UpperCamelCase__ : Dict = loss_fct(__lowerCamelCase , __lowerCamelCase)
if not return_dict:
UpperCamelCase__ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , )
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[Any] = len(UpperCamelCase__)
UpperCamelCase__ : Union[str, Any] = len(matrix[0])
UpperCamelCase__ : str = min(UpperCamelCase__ , UpperCamelCase__)
for row in range(UpperCamelCase__):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase__):
UpperCamelCase__ : Any = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase__ , UpperCamelCase__):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase__ : Any = True
for i in range(row + 1 , UpperCamelCase__):
if matrix[i][row] != 0:
UpperCamelCase__, UpperCamelCase__ : Optional[int] = matrix[i], matrix[row]
UpperCamelCase__ : List[Any] = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase__):
UpperCamelCase__ : Union[str, Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __lowercase (snake_case__ ):
_lowerCamelCase = '''data2vec-vision'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : List[str]=3_072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[str]=1e-12 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=[3, 5, 7, 11] , UpperCAmelCase_ : List[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=0.4 , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[Any]=255 , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**lowercase_)
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Dict = num_hidden_layers
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Any = layer_norm_eps
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : str = patch_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Any = use_mask_token
UpperCamelCase__ : Dict = use_absolute_position_embeddings
UpperCamelCase__ : Union[str, Any] = use_relative_position_bias
UpperCamelCase__ : Any = use_shared_relative_position_bias
UpperCamelCase__ : Tuple = layer_scale_init_value
UpperCamelCase__ : Optional[Any] = drop_path_rate
UpperCamelCase__ : str = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase__ : Dict = out_indices
UpperCamelCase__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ : Any = use_auxiliary_head
UpperCamelCase__ : Union[str, Any] = auxiliary_loss_weight
UpperCamelCase__ : Any = auxiliary_channels
UpperCamelCase__ : int = auxiliary_num_convs
UpperCamelCase__ : List[str] = auxiliary_concat_input
UpperCamelCase__ : Any = semantic_loss_ignore_index
class __lowercase (snake_case__ ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[int]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : int):
return 1e-4
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ = "http://www.mocksite.com/file1.txt"
lowerCAmelCase__ = "\"text\": [\"foo\", \"foo\"]"
lowerCAmelCase__ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __lowercase :
_lowerCamelCase = 200
_lowerCamelCase = {'''Content-Length''': '''100'''}
_lowerCamelCase = {}
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Tuple):
return [bytes(A_ , 'utf-8')]
def __UpperCAmelCase ( *lowerCamelCase_ , **lowerCamelCase_) -> Any:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
import requests
monkeypatch.setattr(lowerCamelCase_ , 'request' , lowerCamelCase_)
UpperCamelCase__ : str = URL
if issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : int = url
elif issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[int] = [url]
elif issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[int] = {'train': url}
UpperCamelCase__ : Any = 'dummy'
UpperCamelCase__ : Optional[int] = 'downloads'
UpperCamelCase__ : str = tmp_path
UpperCamelCase__ : Optional[int] = DownloadConfig(
cache_dir=os.path.join(lowerCamelCase_ , lowerCamelCase_) , use_etag=lowerCamelCase_ , )
UpperCamelCase__ : List[str] = DownloadManager(dataset_name=lowerCamelCase_ , download_config=lowerCamelCase_)
UpperCamelCase__ : List[Any] = dl_manager.download(lowerCamelCase_)
UpperCamelCase__ : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : str = [downloaded_paths]
UpperCamelCase__ : Tuple = [urls]
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
assert "train" in downloaded_paths.keys()
UpperCamelCase__ : int = downloaded_paths.values()
UpperCamelCase__ : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCamelCase_ , lowerCamelCase_):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCamelCase__ : Optional[int] = Path(lowerCamelCase_)
UpperCamelCase__ : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCamelCase__ : List[Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCamelCase__ : int = downloaded_path.with_suffix('.json')
assert metadata_downloaded_path.exists()
UpperCamelCase__ : Optional[int] = json.loads(metadata_downloaded_path.read_text())
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : int = str(lowerCamelCase_)
if issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[str] = filename
elif issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : List[str] = [filename]
elif issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Any = {'train': filename}
UpperCamelCase__ : Tuple = 'dummy'
UpperCamelCase__ : List[Any] = xz_file.parent
UpperCamelCase__ : Any = 'extracted'
UpperCamelCase__ : Optional[int] = DownloadConfig(
cache_dir=lowerCamelCase_ , use_etag=lowerCamelCase_ , )
UpperCamelCase__ : Optional[int] = DownloadManager(dataset_name=lowerCamelCase_ , download_config=lowerCamelCase_)
UpperCamelCase__ : str = dl_manager.extract(lowerCamelCase_)
UpperCamelCase__ : Tuple = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = [extracted_paths]
UpperCamelCase__ : Any = [paths]
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
assert "train" in extracted_paths.keys()
UpperCamelCase__ : List[str] = extracted_paths.values()
UpperCamelCase__ : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCamelCase_ , lowerCamelCase_):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCamelCase__ : Union[str, Any] = Path(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCamelCase_ , etag=lowerCamelCase_)
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCamelCase__ : List[Any] = extracted_path.read_text()
UpperCamelCase__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
assert path.endswith('.jsonl')
for num_items, line in enumerate(lowerCamelCase_ , start=1):
UpperCamelCase__ : List[Any] = json.loads(line.decode('utf-8'))
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : List[str] = request.getfixturevalue(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase_) , start=1):
_test_jsonl(lowerCamelCase_ , lowerCamelCase_)
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : str = request.getfixturevalue(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase_) , start=1):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCamelCase_) , start=1):
_test_jsonl(lowerCamelCase_ , lowerCamelCase_)
assert num_tar == 1
assert num_jsonl == 2
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCamelCase_) , start=1):
assert os.path.basename(lowerCamelCase_) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowerCAmelCase__ = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
lowerCAmelCase__ = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
lowerCAmelCase__ = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def __UpperCamelCase ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=False):
if return_pvalue:
UpperCamelCase__ : Tuple = pearsonr(__UpperCamelCase , __UpperCamelCase)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCamelCase , __UpperCamelCase)[0])}
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __lowercase (A_ ):
_lowerCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''audio''': Audio()} )
_lowerCamelCase = Features({'''transcription''': Value('''string''' )} )
_lowerCamelCase = "audio"
_lowerCamelCase = "transcription"
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : str):
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.')
if not isinstance(features[self.audio_column] , UpperCAmelCase_):
raise ValueError(F'Column {self.audio_column} is not an Audio type.')
UpperCamelCase__ : int = copy.deepcopy(self)
UpperCamelCase__ : Tuple = self.input_schema.copy()
UpperCamelCase__ : Tuple = features[self.audio_column]
UpperCamelCase__ : List[str] = input_schema
return task_template
@property
def __UpperCamelCase ( self : str):
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : List[Any] = filter(lambda lowerCamelCase_: p.requires_grad , model.parameters())
UpperCamelCase__ : List[Any] = sum([np.prod(p.size()) for p in model_parameters])
return params
lowerCAmelCase__ = logging.getLogger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
if metric == "rouge2":
UpperCamelCase__ : str = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
UpperCamelCase__ : Dict = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
UpperCamelCase__ : List[str] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
UpperCamelCase__ : List[Any] = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.')
UpperCamelCase__ : Tuple = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Dict:
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class __lowercase (pl.Callback ):
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : int = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(UpperCamelCase__)
@rank_zero_only
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=True):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****')
UpperCamelCase__ : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
UpperCamelCase__ : List[str] = Path(pl_module.hparams.output_dir)
if type_path == "test":
UpperCamelCase__ : List[str] = od / 'test_results.txt'
UpperCamelCase__ : Optional[Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase__ : List[Any] = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
UpperCamelCase__ : str = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=UpperCamelCase__)
generations_file.parent.mkdir(exist_ok=UpperCamelCase__)
with open(UpperCamelCase__ , 'a+') as writer:
for key in sorted(UpperCamelCase__):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase__ : Any = metrics[key]
if isinstance(UpperCamelCase__ , torch.Tensor):
UpperCamelCase__ : Tuple = val.item()
UpperCamelCase__ : Optional[Any] = F'{key}: {val:.6f}\n'
writer.write(UpperCamelCase__)
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase__ : List[Any] = '\n'.join(metrics['preds'])
generations_file.open('w+').write(UpperCamelCase__)
@rank_zero_only
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict):
try:
UpperCamelCase__ : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase__ : Optional[int] = pl_module.model.num_parameters()
UpperCamelCase__ : Any = count_trainable_parameters(UpperCamelCase__)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6})
@rank_zero_only
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , 'test')
@rank_zero_only
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]):
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> float:
UpperCamelCase__ : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCAmelCase ( ) -> List[str]:
print(sum_of_series(1 , 1 , 10))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __lowercase (datasets.BeamBasedBuilder ):
def __UpperCamelCase ( self : Optional[Any]):
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string')}) , supervised_keys=UpperCamelCase__ , )
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()})]
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__)
class __lowercase (datasets.BeamBasedBuilder ):
def __UpperCamelCase ( self : Dict):
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}) , supervised_keys=UpperCamelCase__ , )
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()})
]
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__)
def __UpperCAmelCase ( ) -> Any:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
def __UpperCAmelCase ( ) -> Union[str, Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'])]
class __lowercase (lowercase_ ):
@require_beam
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Any = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Dict = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow')))
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string')}))
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__)
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__)
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1])
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json')))
del dset
@require_beam
def __UpperCamelCase ( self : Dict):
import apache_beam as beam
UpperCamelCase__ : Optional[int] = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : str = len(get_test_dummy_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner')
with patch('apache_beam.io.parquetio.WriteToParquet') as write_parquet_mock:
UpperCamelCase__ : Union[str, Any] = partial(UpperCamelCase__ , num_shards=2)
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow')))
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow')))
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string')}))
UpperCamelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__)
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__)
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content']) , sorted(['foo', 'bar', 'foobar']))
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json')))
del dset
@require_beam
def __UpperCamelCase ( self : Any):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Tuple = DummyBeamDataset(cache_dir=UpperCamelCase__)
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare)
@require_beam
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[Any] = len(get_test_nested_examples())
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Union[str, Any] = NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner')
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow')))
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string')})}))
UpperCamelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__)
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__)
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1])
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1])
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json')))
del dset
| 710 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowerCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def __UpperCAmelCase ( ) -> Any:
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.')
parser.add_argument(
'--validation_file' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='A csv or a json file containing the validation data.')
parser.add_argument(
'--max_length' , type=__lowerCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__lowerCAmelCase , )
parser.add_argument(
'--config_name' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=__lowerCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Where to store the final ONNX file.')
UpperCamelCase__ : List[str] = parser.parse_args()
return args
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_="cpu") -> int:
UpperCamelCase__ : Optional[int] = model_dict[model_name].from_pretrained(__lowerCAmelCase).to(__lowerCAmelCase)
UpperCamelCase__ : List[str] = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase)
if model_name in ["facebook/bart-base"]:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Union[str, Any] = 0
return huggingface_model, tokenizer
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
model.eval()
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Union[str, Any] = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase))
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = 'My friends are cool but they eat too many carbs.'
UpperCamelCase__ : str = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt').to(model.device)
UpperCamelCase__ : Optional[int] = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=__lowerCAmelCase , )
logger.info('Model exported to {}'.format(__lowerCAmelCase))
UpperCamelCase__ : str = remove_dup_initializers(os.path.abspath(__lowerCAmelCase))
logger.info('Deduplicated and optimized model written to {}'.format(__lowerCAmelCase))
UpperCamelCase__ : int = onnxruntime.InferenceSession(__lowerCAmelCase)
UpperCamelCase__ : Any = ort_sess.run(
__lowerCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(__lowerCAmelCase),
'max_length': np.array(__lowerCAmelCase),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info('Model outputs from torch and ONNX Runtime are similar.')
logger.info('Success.')
def __UpperCAmelCase ( ) -> Any:
UpperCamelCase__ : Union[str, Any] = parse_args()
UpperCamelCase__ : Optional[Any] = 5
UpperCamelCase__ : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
UpperCamelCase__ : List[Any] = torch.device(args.device)
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase)
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
model.to(__lowerCAmelCase)
if args.max_length:
UpperCamelCase__ : str = args.max_length
if args.num_beams:
UpperCamelCase__ : str = args.num_beams
if args.output_file_path:
UpperCamelCase__ : List[str] = args.output_file_path
else:
UpperCamelCase__ : int = 'BART.onnx'
logger.info('Exporting model to ONNX')
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=_UpperCamelCase ):
_lowerCamelCase = ['''note_seq''']
def __init__( self : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
requires_backends(self , ['note_seq'])
@classmethod
def __UpperCamelCase ( cls : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(cls , ['note_seq'])
@classmethod
def __UpperCamelCase ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any):
requires_backends(cls , ['note_seq'])
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class __lowercase (__lowerCamelCase ):
pass
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
for shard in shards:
for i in range(UpperCAmelCase__):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> List[str]:
UpperCamelCase__ : List[str] = int(os.environ['RANK'])
UpperCamelCase__ : List[str] = int(os.environ['WORLD_SIZE'])
UpperCamelCase__ : Union[str, Any] = ArgumentParser()
parser.add_argument('--streaming' , type=UpperCAmelCase__)
parser.add_argument('--local_rank' , type=UpperCAmelCase__)
parser.add_argument('--num_workers' , type=UpperCAmelCase__ , default=0)
UpperCamelCase__ : List[Any] = parser.parse_args()
UpperCamelCase__ : Optional[Any] = args.streaming
UpperCamelCase__ : Optional[int] = args.num_workers
UpperCamelCase__ : Dict = {'shards': [f'shard_{shard_idx}' for shard_idx in range(UpperCAmelCase__)]}
UpperCamelCase__ : str = IterableDataset.from_generator(UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__)
if not streaming:
UpperCamelCase__ : Any = Dataset.from_list(list(UpperCAmelCase__))
UpperCamelCase__ : Optional[int] = split_dataset_by_node(UpperCAmelCase__ , rank=UpperCAmelCase__ , world_size=UpperCAmelCase__)
UpperCamelCase__ : Dict = torch.utils.data.DataLoader(UpperCAmelCase__ , num_workers=UpperCAmelCase__)
UpperCamelCase__ : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase__ : int = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
UpperCamelCase__ : Any = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}')
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCAmelCase_ : int=50_265 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Any=3_072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=1e-5 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Any=1_024 , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : Any=128 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Tuple=128 , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=224 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(
vocab_size=UpperCAmelCase_ , hidden_size=UpperCAmelCase_ , num_hidden_layers=UpperCAmelCase_ , num_attention_heads=UpperCAmelCase_ , intermediate_size=UpperCAmelCase_ , hidden_act=UpperCAmelCase_ , hidden_dropout_prob=UpperCAmelCase_ , attention_probs_dropout_prob=UpperCAmelCase_ , max_position_embeddings=UpperCAmelCase_ , type_vocab_size=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , layer_norm_eps=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : str = max_ad_position_embeddings
UpperCamelCase__ : Tuple = coordinate_size
UpperCamelCase__ : Any = shape_size
UpperCamelCase__ : List[Any] = has_relative_attention_bias
UpperCamelCase__ : List[str] = rel_pos_bins
UpperCamelCase__ : Union[str, Any] = max_rel_pos
UpperCamelCase__ : Any = has_spatial_attention_bias
UpperCamelCase__ : Optional[Any] = rel_ad_pos_bins
UpperCamelCase__ : Optional[Any] = max_rel_ad_pos
UpperCamelCase__ : Union[str, Any] = text_embed
UpperCamelCase__ : Dict = visual_embed
UpperCamelCase__ : List[Any] = input_size
UpperCamelCase__ : Optional[Any] = num_channels
UpperCamelCase__ : int = patch_size
UpperCamelCase__ : Any = classifier_dropout
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.12''' )
@property
def __UpperCamelCase ( self : Tuple):
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def __UpperCamelCase ( self : Any):
return 1e-5
@property
def __UpperCamelCase ( self : str):
return 12
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] = -1 , UpperCAmelCase_ : Optional[int] = -1 , UpperCAmelCase_ : Optional[Any] = False , UpperCAmelCase_ : int = None , UpperCAmelCase_ : List[Any] = 3 , UpperCAmelCase_ : Dict = 40 , UpperCAmelCase_ : Optional[Any] = 40 , ):
setattr(processor.image_processor , 'apply_ocr' , UpperCAmelCase_)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ : Any = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ : Tuple = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
UpperCamelCase__ : Tuple = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ : int = [[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ : int = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ : Union[str, Any] = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = dict(
processor(
UpperCAmelCase_ , text=UpperCAmelCase_ , boxes=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , ))
return inputs
| 715 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 0 |
'''simple docstring'''
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
# Return True if there is node that has not iterated.
UpperCamelCase__ : List[Any] = [False] * len(lowerCamelCase_)
UpperCamelCase__ : List[str] = [s]
UpperCamelCase__ : List[Any] = True
while queue:
UpperCamelCase__ : Union[str, Any] = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_)
UpperCamelCase__ : Any = True
UpperCamelCase__ : Dict = u
return visited[t]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : Dict = [-1] * (len(lowerCamelCase_))
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : int = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = float('Inf')
UpperCamelCase__ : Any = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase__ : int = min(lowerCamelCase_ , graph[parent[s]][s])
UpperCamelCase__ : Optional[int] = parent[s]
max_flow += path_flow
UpperCamelCase__ : Any = sink
while v != source:
UpperCamelCase__ : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase__ : Union[str, Any] = parent[v]
for i in range(len(lowerCamelCase_)):
for j in range(len(graph[0])):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j))
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 716 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'])
UpperCamelCase__ : str = MaskFormerConfig(backbone_config=lowerCamelCase_)
UpperCamelCase__ : Any = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase__ : List[str] = 847
UpperCamelCase__ : str = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
UpperCamelCase__ : Any = 150
UpperCamelCase__ : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase__ : List[str] = 171
UpperCamelCase__ : str = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
UpperCamelCase__ : Tuple = 133
UpperCamelCase__ : List[str] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase__ : Dict = 19
UpperCamelCase__ : Tuple = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
UpperCamelCase__ : List[str] = 65
UpperCamelCase__ : str = '''mapillary-vistas-id2label.json'''
UpperCamelCase__ : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset') , 'r'))
UpperCamelCase__ : str = {int(lowerCamelCase_): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
UpperCamelCase__ : str = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias'))
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight'))
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'))
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'))
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight'))
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight'))
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias'))
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight'))
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias'))
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias'))
for source_index, target_index in zip(range(3 , 0 , -1) , range(0 , 3)):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight'))
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight'))
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias'))
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight'))
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight'))
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias'))
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight'))
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias'))
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias'))
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias'))
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias'))
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias'))
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias'))
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias'))
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight'))
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias'))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight'))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias'))
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias'))
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight'))
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias'))
for i in range(3):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight'))
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias'))
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : str = dct.pop(lowerCamelCase_)
UpperCamelCase__ : Tuple = val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : int = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
UpperCamelCase__ : List[str] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ : int = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight')
UpperCamelCase__ : List[Any] = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[Any] = in_proj_weight[:dim, :]
UpperCamelCase__ : Optional[Any] = in_proj_bias[: dim]
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ : int = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ : Any = in_proj_weight[
-dim :, :
]
UpperCamelCase__ : str = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ : Tuple = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight')
UpperCamelCase__ : Optional[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : int = in_proj_weight[: hidden_size, :]
UpperCamelCase__ : Any = in_proj_bias[:config.hidden_size]
UpperCamelCase__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ : Any = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ : int = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ : str = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight')
UpperCamelCase__ : Optional[Any] = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Dict = in_proj_weight[: hidden_size, :]
UpperCamelCase__ : Tuple = in_proj_bias[:config.hidden_size]
UpperCamelCase__ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ : Union[str, Any] = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False) -> int:
UpperCamelCase__ : Union[str, Any] = get_maskformer_config(lowerCamelCase_)
# load original state_dict
with open(lowerCamelCase_ , 'rb') as f:
UpperCamelCase__ : Dict = pickle.load(lowerCamelCase_)
UpperCamelCase__ : List[Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase__ : Optional[int] = create_rename_keys(lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
read_in_swin_q_k_v(lowerCamelCase_ , config.backbone_config)
read_in_decoder_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase__ : int = torch.from_numpy(lowerCamelCase_)
# load 🤗 model
UpperCamelCase__ : Dict = MaskFormerForInstanceSegmentation(lowerCamelCase_)
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase_ , param.shape)
UpperCamelCase__ : Any = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_)
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase_) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
UpperCamelCase__ : List[Any] = prepare_img()
if "vistas" in model_name:
UpperCamelCase__ : Optional[Any] = 65
elif "cityscapes" in model_name:
UpperCamelCase__ : int = 65_535
else:
UpperCamelCase__ : str = 255
UpperCamelCase__ : Optional[Any] = True if '''ade''' in model_name else False
UpperCamelCase__ : Tuple = MaskFormerImageProcessor(ignore_index=lowerCamelCase_ , reduce_labels=lowerCamelCase_)
UpperCamelCase__ : Tuple = image_processor(lowerCamelCase_ , return_tensors='pt')
UpperCamelCase__ : Any = model(**lowerCamelCase_)
print('Logits:' , outputs.class_queries_logits[0, :3, :3])
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase__ : Dict = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]])
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}')
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model and image processor to the hub...')
model.push_to_hub(f'nielsr/{model_name}')
image_processor.push_to_hub(f'nielsr/{model_name}')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : str = 1.5
UpperCamelCase__ : Dict = int(factor * num_class_images)
UpperCamelCase__ : Optional[int] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=snake_case_ , aesthetic_weight=0.1)
os.makedirs(f'{class_data_dir}/images' , exist_ok=snake_case_)
if len(list(Path(f'{class_data_dir}/images').iterdir())) >= num_class_images:
return
while True:
UpperCamelCase__ : int = client.query(text=snake_case_)
if len(snake_case_) >= factor * num_class_images or num_images > 1e4:
break
else:
UpperCamelCase__ : Dict = int(factor * num_images)
UpperCamelCase__ : Any = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=snake_case_ , aesthetic_weight=0.1 , )
UpperCamelCase__ : int = 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Dict = tqdm(desc='downloading real regularization images' , total=snake_case_)
with open(f'{class_data_dir}/caption.txt' , 'w') as fa, open(f'{class_data_dir}/urls.txt' , 'w') as fa, open(
f'{class_data_dir}/images.txt' , 'w') as fa:
while total < num_class_images:
UpperCamelCase__ : Dict = class_images[count]
count += 1
try:
UpperCamelCase__ : Union[str, Any] = requests.get(images['url'])
if img.status_code == 200:
UpperCamelCase__ : List[str] = Image.open(BytesIO(img.content))
with open(f'{class_data_dir}/images/{total}.jpg' , 'wb') as f:
f.write(img.content)
fa.write(images['caption'] + '\n')
fa.write(images['url'] + '\n')
fa.write(f'{class_data_dir}/images/{total}.jpg' + '\n')
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def __UpperCAmelCase ( ) -> List[str]:
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser('' , add_help=snake_case_)
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=snake_case_ , type=snake_case_)
parser.add_argument('--class_data_dir' , help='path to save images' , required=snake_case_ , type=snake_case_)
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=snake_case_)
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class __lowercase (lowercase_ ):
_lowerCamelCase = '''bloom'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : List[str] , UpperCAmelCase_ : Dict=250_880 , UpperCAmelCase_ : Any=64 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : Any = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase__ : int = kwargs.pop('n_embed' , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase__ : Union[str, Any] = n_layer
UpperCamelCase__ : Tuple = n_head
UpperCamelCase__ : List[Any] = layer_norm_epsilon
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : List[Any] = use_cache
UpperCamelCase__ : Dict = pretraining_tp
UpperCamelCase__ : Optional[Any] = apply_residual_connection_post_layernorm
UpperCamelCase__ : Dict = hidden_dropout
UpperCamelCase__ : Tuple = attention_dropout
UpperCamelCase__ : List[str] = bos_token_id
UpperCamelCase__ : Optional[Any] = eos_token_id
UpperCamelCase__ : Tuple = slow_but_exact
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
class __lowercase (lowercase_ ):
_lowerCamelCase = version.parse('''1.12''' )
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple = "default" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = False , ):
super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_)
if not getattr(self._config , 'pad_token_id' , UpperCAmelCase_):
# TODO: how to do that better?
UpperCamelCase__ : Dict = 0
@property
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Any = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs' , inverted_values_shape=UpperCAmelCase_)
UpperCamelCase__ : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase__ : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCamelCase ( self : Optional[Any]):
return self._config.n_layer
@property
def __UpperCamelCase ( self : int):
return self._config.n_head
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-3
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : Dict = -1 , UpperCAmelCase_ : Any = False , UpperCAmelCase_ : List[str] = None , ):
UpperCamelCase__ : Optional[int] = super(UpperCAmelCase_ , self).generate_dummy_inputs(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
# We need to order the input in the way they appears in the forward()
UpperCamelCase__ : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
UpperCamelCase__, UpperCamelCase__ : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase__ : List[Any] = seqlen + 2
UpperCamelCase__ : List[Any] = self._config.hidden_size // self.num_attention_heads
UpperCamelCase__ : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase__ : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase__ : List[str] = [
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(self.num_layers)
]
UpperCamelCase__ : Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase__ : Tuple = ordered_inputs['attention_mask'].dtype
UpperCamelCase__ : Any = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
return ordered_inputs
@property
def __UpperCamelCase ( self : List[str]):
return 13 | 719 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = """wavlm"""
def __init__( self : int , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : int=1e-5 , UpperCAmelCase_ : Optional[int]="group" , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=128 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : int=320 , UpperCAmelCase_ : Optional[int]=800 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=0.05 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : str=320 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=100 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[str]="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 1_500) , UpperCAmelCase_ : Optional[int]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : List[str]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Dict = feat_extract_norm
UpperCamelCase__ : List[str] = feat_extract_activation
UpperCamelCase__ : Optional[int] = list(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = list(UpperCAmelCase_)
UpperCamelCase__ : Tuple = list(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = conv_bias
UpperCamelCase__ : Tuple = num_buckets
UpperCamelCase__ : Tuple = max_bucket_distance
UpperCamelCase__ : str = num_conv_pos_embeddings
UpperCamelCase__ : List[Any] = num_conv_pos_embedding_groups
UpperCamelCase__ : int = len(self.conv_dim)
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = hidden_dropout
UpperCamelCase__ : int = attention_dropout
UpperCamelCase__ : Union[str, Any] = activation_dropout
UpperCamelCase__ : Tuple = feat_proj_dropout
UpperCamelCase__ : Optional[int] = final_dropout
UpperCamelCase__ : List[str] = layerdrop
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : str = num_ctc_classes
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : Optional[int] = do_stable_layer_norm
UpperCamelCase__ : Any = use_weighted_layer_sum
UpperCamelCase__ : str = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : Any = apply_spec_augment
UpperCamelCase__ : Optional[Any] = mask_time_prob
UpperCamelCase__ : Optional[int] = mask_time_length
UpperCamelCase__ : List[Any] = mask_time_min_masks
UpperCamelCase__ : Optional[Any] = mask_feature_prob
UpperCamelCase__ : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ : Optional[Any] = num_codevectors_per_group
UpperCamelCase__ : Optional[Any] = num_codevector_groups
UpperCamelCase__ : Union[str, Any] = contrastive_logits_temperature
UpperCamelCase__ : List[Any] = num_negatives
UpperCamelCase__ : Any = codevector_dim
UpperCamelCase__ : Optional[int] = proj_codevector_dim
UpperCamelCase__ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCamelCase__ : Optional[Any] = ctc_loss_reduction
UpperCamelCase__ : Any = ctc_zero_infinity
# adapter
UpperCamelCase__ : Optional[int] = add_adapter
UpperCamelCase__ : List[Any] = adapter_kernel_size
UpperCamelCase__ : List[str] = adapter_stride
UpperCamelCase__ : int = num_adapter_layers
UpperCamelCase__ : Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ : List[str] = list(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = list(UpperCAmelCase_)
UpperCamelCase__ : Dict = list(UpperCAmelCase_)
UpperCamelCase__ : Any = xvector_output_dim
@property
def __UpperCamelCase ( self : str):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
if not isinstance(lowerCamelCase_ , lowerCamelCase_) or number < 0:
raise ValueError('Input must be a non-negative integer')
UpperCamelCase__ : Union[str, Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Dict = SamImageProcessor()
UpperCamelCase__ : List[Any] = SamProcessor(UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Any):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_).image_processor
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : int = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : str = processor(images=UpperCAmelCase_ , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_torch
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : str = [torch.ones((1, 3, 5, 5))]
UpperCamelCase__ : List[Any] = [[1_764, 2_646]]
UpperCamelCase__ : Optional[int] = [[683, 1_024]]
UpperCamelCase__ : int = processor.post_process_masks(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : Tuple = processor.post_process_masks(
UpperCAmelCase_ , torch.tensor(UpperCAmelCase_) , torch.tensor(UpperCAmelCase_))
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
# should also work with np
UpperCamelCase__ : Union[str, Any] = [np.ones((1, 3, 5, 5))]
UpperCamelCase__ : Optional[int] = processor.post_process_masks(UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_))
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : List[Any] = processor.post_process_masks(UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_))
@require_vision
@require_tf
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Tuple = SamImageProcessor()
UpperCamelCase__ : List[Any] = SamProcessor(UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_).image_processor
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Tuple = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[Any] = processor(images=UpperCAmelCase_ , return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
@require_tf
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Tuple = self.get_image_processor()
UpperCamelCase__ : Optional[int] = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [tf.ones((1, 3, 5, 5))]
UpperCamelCase__ : Optional[int] = [[1_764, 2_646]]
UpperCamelCase__ : Optional[int] = [[683, 1_024]]
UpperCamelCase__ : Optional[Any] = processor.post_process_masks(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : int = processor.post_process_masks(
UpperCAmelCase_ , tf.convert_to_tensor(UpperCAmelCase_) , tf.convert_to_tensor(UpperCAmelCase_) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
# should also work with np
UpperCamelCase__ : List[Any] = [np.ones((1, 3, 5, 5))]
UpperCamelCase__ : int = processor.post_process_masks(
UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_) , return_tensors='tf')
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646))
UpperCamelCase__ : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
UpperCamelCase__ : List[Any] = processor.post_process_masks(
UpperCAmelCase_ , np.array(UpperCAmelCase_) , np.array(UpperCAmelCase_) , return_tensors='tf')
@require_vision
@require_torchvision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
UpperCamelCase__ : str = tempfile.mkdtemp()
UpperCamelCase__ : Union[str, Any] = SamImageProcessor()
UpperCamelCase__ : List[Any] = SamProcessor(UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : List[str] , **UpperCAmelCase_ : List[str]):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_).image_processor
def __UpperCamelCase ( self : Dict):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : int = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : str = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa)
UpperCamelCase__ : List[Any] = [tf.convert_to_tensor(UpperCAmelCase_)]
UpperCamelCase__ : str = [torch.tensor(UpperCAmelCase_)]
UpperCamelCase__ : List[Any] = [[1_764, 2_646]]
UpperCamelCase__ : List[Any] = [[683, 1_024]]
UpperCamelCase__ : Any = processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='tf')
UpperCamelCase__ : Tuple = processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Any = SamProcessor(image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='pt')['pixel_values'].numpy()
UpperCamelCase__ : Tuple = processor(images=UpperCAmelCase_ , return_tensors='pt')['pixel_values'].numpy()
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='tf')['pixel_values'].numpy()
UpperCamelCase__ : Dict = processor(images=UpperCAmelCase_ , return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_))
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_))
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : list):
UpperCamelCase__ : Any = set_counts
UpperCamelCase__ : Optional[int] = max(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = len(UpperCAmelCase_)
UpperCamelCase__ : str = [1] * num_sets
UpperCamelCase__ : Optional[int] = list(range(UpperCAmelCase_))
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : int = self.get_parent(UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.get_parent(UpperCAmelCase_)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Tuple = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ : Dict = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : int = src_parent
UpperCamelCase__ : str = self.set_counts[src_parent]
UpperCamelCase__ : str = max(self.max_set , UpperCAmelCase_)
return True
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int):
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ : Any = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = 256
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''melgan''']
def __init__( self : int , UpperCAmelCase_ : SpectrogramNotesEncoder , UpperCAmelCase_ : SpectrogramContEncoder , UpperCAmelCase_ : TaFilmDecoder , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
UpperCamelCase__ : Optional[int] = math.log(1e-5) # Matches MelGAN training.
UpperCamelCase__ : Tuple = 4.0 # Largest value for most examples
UpperCamelCase__ : str = 128
self.register_modules(
notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=(-1.0, 1.0) , UpperCAmelCase_ : Union[str, Any]=False):
UpperCamelCase__ : Dict = output_range
if clip:
UpperCamelCase__ : Any = torch.clip(UpperCAmelCase_ , self.min_value , self.max_value)
# Scale to [0, 1].
UpperCamelCase__ : Dict = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=(-1.0, 1.0) , UpperCAmelCase_ : Dict=False):
UpperCamelCase__ : Any = input_range
UpperCamelCase__ : Optional[Any] = torch.clip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) if clip else outputs
# Scale to [0, 1].
UpperCamelCase__ : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Tuple = input_tokens > 0
UpperCamelCase__ : Dict = self.notes_encoder(
encoder_input_tokens=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.continuous_encoder(
encoder_inputs=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = noise_time
if not torch.is_tensor(UpperCAmelCase_):
UpperCamelCase__ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0:
UpperCamelCase__ : Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : Optional[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
UpperCamelCase__ : Any = self.decoder(
encodings_and_masks=UpperCAmelCase_ , decoder_input_tokens=UpperCAmelCase_ , decoder_noise_time=UpperCAmelCase_)
return logits
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[List[int]] , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "numpy" , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCAmelCase_)}.')
UpperCamelCase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
UpperCamelCase__ : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa)
UpperCamelCase__ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device)
for i, encoder_input_tokens in enumerate(UpperCAmelCase_):
if i == 0:
UpperCamelCase__ : List[str] = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
UpperCamelCase__ : Any = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase__ : Union[str, Any] = ones
UpperCamelCase__ : Tuple = self.scale_features(
UpperCAmelCase_ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=UpperCAmelCase_ , continuous_mask=UpperCAmelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase__ : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase_)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
UpperCamelCase__ : Any = self.decode(
encodings_and_masks=UpperCAmelCase_ , input_tokens=UpperCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase__ : Optional[int] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
UpperCamelCase__ : List[str] = self.scale_to_features(UpperCAmelCase_ , input_range=[-1.0, 1.0])
UpperCamelCase__ : List[str] = mel[:1]
UpperCamelCase__ : int = mel.cpu().float().numpy()
UpperCamelCase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_)
logger.info('Generated segment' , UpperCAmelCase_)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.')
if output_type == "numpy":
UpperCamelCase__ : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
UpperCamelCase__ : Optional[int] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase_)
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = LongformerTokenizer
_lowerCamelCase = True
_lowerCamelCase = LongformerTokenizerFast
_lowerCamelCase = True
def __UpperCamelCase ( self : Optional[Any]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase__ : Optional[int] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase__ : List[str] = {'unk_token': '<unk>'}
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def __UpperCamelCase ( self : int , **UpperCAmelCase_ : int):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Dict):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : str = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase__ : int = tokenizer.tokenize(UpperCAmelCase_) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokens + [tokenizer.unk_token]
UpperCamelCase__ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCAmelCase_) , [0, 31_414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCAmelCase_) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096')
UpperCamelCase__ : str = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] = 'Encode this sequence.'
UpperCamelCase__ : Union[str, Any] = tokenizer.byte_encoder[' '.encode('utf-8')[0]]
# Testing encoder arguments
UpperCamelCase__ : List[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
tokenizer.add_special_tokens({'bos_token': '<s>'})
UpperCamelCase__ : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Testing spaces after special tokens
UpperCamelCase__ : Tuple = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_)}) # mask token has a left space
UpperCamelCase__ : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = 'Encode <mask> sequence'
UpperCamelCase__ : Optional[Any] = 'Encode <mask>sequence'
UpperCamelCase__ : List[str] = tokenizer.encode(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = encoded.index(UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.encode(UpperCAmelCase_)
UpperCamelCase__ : Any = encoded.index(UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
pass
def __UpperCamelCase ( self : Optional[int]):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = 'A, <mask> AllenNLP sentence.'
UpperCamelCase__ : Any = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
UpperCamelCase__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
UpperCamelCase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
def __UpperCamelCase ( self : Tuple):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
UpperCamelCase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCAmelCase_)
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCAmelCase_)
self.assertEqual(post_processor_state['trim_offsets'] , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase__ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ : str = F'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : str = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_), len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_), len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Dict = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_) + 1, 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_), 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_), 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Tuple:
UpperCamelCase__ : Optional[Any] = None
if token is not None:
UpperCamelCase__ : str = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
UpperCamelCase__ : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
UpperCamelCase__ : List[str] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_).json()
UpperCamelCase__ : Tuple = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']})
UpperCamelCase__ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100)
for i in range(lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=lowerCamelCase_).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']})
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}')
return {}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Any:
UpperCamelCase__ : Optional[int] = None
if token is not None:
UpperCamelCase__ : Any = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
UpperCamelCase__ : Tuple = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
UpperCamelCase__ : Dict = requests.get(lowerCamelCase_ , headers=lowerCamelCase_).json()
UpperCamelCase__ : Tuple = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']})
UpperCamelCase__ : Dict = math.ceil((result['total_count'] - 100) / 100)
for i in range(lowerCamelCase_):
UpperCamelCase__ : Any = requests.get(url + f'&page={i + 2}' , headers=lowerCamelCase_).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']})
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}')
return {}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : List[Any] = None
if token is not None:
UpperCamelCase__ : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
UpperCamelCase__ : Optional[Any] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_)
UpperCamelCase__ : Tuple = result.headers['Location']
UpperCamelCase__ : Tuple = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_)
UpperCamelCase__ : List[Any] = os.path.join(lowerCamelCase_ , f'{artifact_name}.zip')
with open(lowerCamelCase_ , 'wb') as fp:
fp.write(response.content)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> List[str]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Optional[int] = None
with zipfile.ZipFile(lowerCamelCase_) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_) as f:
for line in f:
UpperCamelCase__ : Optional[Any] = line.decode('UTF-8').strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase__ : List[Any] = line[: line.index(': ')]
UpperCamelCase__ : Tuple = line[line.index(': ') + len(': ') :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED '):
# `test` is the test method that failed
UpperCamelCase__ : List[str] = line[len('FAILED ') :]
failed_tests.append(lowerCamelCase_)
elif filename == "job_name.txt":
UpperCamelCase__ : Tuple = line
if len(lowerCamelCase_) != len(lowerCamelCase_):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_)} for `errors` '
f'and {len(lowerCamelCase_)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.')
UpperCamelCase__ : Dict = None
if job_name and job_links:
UpperCamelCase__ : List[str] = job_links.get(lowerCamelCase_ , lowerCamelCase_)
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase__ : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_)]
return result
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Any:
UpperCamelCase__ : int = []
UpperCamelCase__ : Dict = [os.path.join(lowerCamelCase_ , lowerCamelCase_) for p in os.listdir(lowerCamelCase_) if p.endswith('.zip')]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_))
return errors
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> Any:
UpperCamelCase__ : Optional[int] = Counter()
counter.update([x[1] for x in logs])
UpperCamelCase__ : Dict = counter.most_common()
UpperCamelCase__ : str = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase__ : List[Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase__ : str = dict(sorted(r.items() , key=lambda lowerCamelCase_: item[1]["count"] , reverse=lowerCamelCase_))
return r
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = test.split('::')[0]
if test.startswith('tests/models/'):
UpperCamelCase__ : Optional[int] = test.split('/')[2]
else:
UpperCamelCase__ : int = None
return test
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None) -> int:
UpperCamelCase__ : Tuple = [(x[0], x[1], get_model(x[2])) for x in logs]
UpperCamelCase__ : Tuple = [x for x in logs if x[2] is not None]
UpperCamelCase__ : List[Any] = {x[2] for x in logs}
UpperCamelCase__ : Union[str, Any] = {}
for test in tests:
UpperCamelCase__ : List[Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
UpperCamelCase__ : int = counter.most_common()
UpperCamelCase__ : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase__ : Tuple = sum(error_counts.values())
if n_errors > 0:
UpperCamelCase__ : int = {'count': n_errors, 'errors': error_counts}
UpperCamelCase__ : str = dict(sorted(r.items() , key=lambda lowerCamelCase_: item[1]["count"] , reverse=lowerCamelCase_))
return r
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Optional[int] = '| no. | error | status |'
UpperCamelCase__ : Dict = '|-:|:-|:-|'
UpperCamelCase__ : str = [header, sep]
for error in reduced_by_error:
UpperCamelCase__ : Optional[int] = reduced_by_error[error]['count']
UpperCamelCase__ : str = f'| {count} | {error[:100]} | |'
lines.append(lowerCamelCase_)
return "\n".join(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Union[str, Any] = '| model | no. of errors | major error | count |'
UpperCamelCase__ : Dict = '|-:|-:|-:|-:|'
UpperCamelCase__ : Optional[Any] = [header, sep]
for model in reduced_by_model:
UpperCamelCase__ : Any = reduced_by_model[model]['count']
UpperCamelCase__ : int = list(reduced_by_model[model]['errors'].items())[0]
UpperCamelCase__ : str = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(lowerCamelCase_)
return "\n".join(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase__ = get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase__ = k.find(' / ')
lowerCAmelCase__ = k[index + len(' / ') :]
lowerCAmelCase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase__ = reduce_by_error(errors)
lowerCAmelCase__ = reduce_by_model(errors)
lowerCAmelCase__ = make_github_table(reduced_by_error)
lowerCAmelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ = random.Random()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None) -> str:
if rng is None:
UpperCamelCase__ : List[Any] = global_rng
UpperCamelCase__ : str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : List[str]=2_000 , UpperCAmelCase_ : int=24 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=16_000 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , ):
UpperCamelCase__ : Any = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Any = min_seq_length
UpperCamelCase__ : Optional[Any] = max_seq_length
UpperCamelCase__ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : List[str] = feature_size
UpperCamelCase__ : Optional[int] = num_mel_bins
UpperCamelCase__ : Tuple = padding_value
UpperCamelCase__ : List[str] = sampling_rate
UpperCamelCase__ : Optional[Any] = return_attention_mask
UpperCamelCase__ : int = do_normalize
def __UpperCamelCase ( self : Tuple):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Any=False):
def _flatten(UpperCAmelCase_ : List[Any]):
return list(itertools.chain(*UpperCAmelCase_))
if equal_length:
UpperCamelCase__ : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Union[str, Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
UpperCamelCase__ : Optional[Any] = [np.asarray(UpperCAmelCase_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[Any] = SpeechaTextFeatureExtractionTester(self)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0) - 1) < 1e-3))
def __UpperCamelCase ( self : Optional[int]):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : Optional[int] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = [np.asarray(UpperCAmelCase_) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ : Union[str, Any] = feature_extractor(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np').input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
UpperCamelCase__ : str = feature_extractor(speech_inputs[0] , return_tensors='np').input_features
UpperCamelCase__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='np').input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3))
# Test batched
UpperCamelCase__ : Tuple = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
UpperCamelCase__ : Optional[Any] = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Optional[Any] = [floats_list((1, x))[0] for x in (800, 800, 800)]
UpperCamelCase__ : Dict = np.asarray(UpperCAmelCase_)
UpperCamelCase__ : str = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
UpperCamelCase__ : Tuple = feature_extractor(UpperCAmelCase_ , return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3))
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : Union[str, Any] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase__ : Tuple = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = feature_extractor(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_)
UpperCamelCase__ : List[str] = inputs.input_features
UpperCamelCase__ : int = inputs.attention_mask
UpperCamelCase__ : Optional[Any] = [np.sum(UpperCAmelCase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : str = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase__ : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Dict = feature_extractor(
UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = inputs.input_features
UpperCamelCase__ : List[str] = inputs.attention_mask
UpperCamelCase__ : Dict = [np.sum(UpperCAmelCase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = feature_extractor(
UpperCAmelCase_ , padding='max_length' , max_length=4 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = inputs.input_features
UpperCamelCase__ : str = inputs.attention_mask
UpperCamelCase__ : Tuple = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : str = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : int = feature_extractor(
UpperCAmelCase_ , padding='longest' , max_length=4 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
UpperCamelCase__ : int = inputs.input_features
UpperCamelCase__ : Any = inputs.attention_mask
UpperCamelCase__ : Optional[int] = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
UpperCamelCase__ : Optional[Any] = [floats_list((1, x))[0] for x in range(800 , 1_400 , 200)]
UpperCamelCase__ : Optional[Any] = feature_extractor(
UpperCAmelCase_ , padding='longest' , max_length=16 , truncation=UpperCAmelCase_ , return_tensors='np' , return_attention_mask=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = inputs.input_features
UpperCamelCase__ : int = inputs.attention_mask
UpperCamelCase__ : Tuple = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def __UpperCamelCase ( self : Any):
import torch
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : Dict = np.random.rand(100 , 32).astype(np.floataa)
UpperCamelCase__ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_features.dtype == np.floataa)
UpperCamelCase__ : int = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple):
from datasets import load_dataset
UpperCamelCase__ : Tuple = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
UpperCamelCase__ : List[str] = ds.sort('id').select(range(UpperCAmelCase_))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : List[Any]):
# fmt: off
UpperCamelCase__ : Any = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
])
# fmt: on
UpperCamelCase__ : List[str] = self._load_datasamples(1)
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCamelCase__ : int = feature_extractor(UpperCAmelCase_ , return_tensors='pt').input_features
self.assertEquals(input_features.shape , (1, 584, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1e-4))
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase__ = ['text', 'image', 'audio']
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input')
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png').resize((512, 512)))
elif input_type == "audio":
inputs.append(torch.ones(3_000))
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
inputs.append(create_inputs(lowerCamelCase_))
else:
raise ValueError(f'Invalid type requested: {input_type}')
return inputs
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Tuple = []
for output in outputs:
if isinstance(lowerCamelCase_ , (str, AgentText)):
output_types.append('text')
elif isinstance(lowerCamelCase_ , (Image.Image, AgentImage)):
output_types.append('image')
elif isinstance(lowerCamelCase_ , (torch.Tensor, AgentAudio)):
output_types.append('audio')
else:
raise ValueError(f'Invalid output: {output}')
return output_types
@is_tool_test
class __lowercase :
def __UpperCamelCase ( self : List[str]):
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
UpperCamelCase__ : int = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
UpperCamelCase__ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = create_inputs(self.tool.inputs)
UpperCamelCase__ : Any = self.tool(*UpperCAmelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
UpperCamelCase__ : Optional[Any] = [outputs]
self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs)
def __UpperCamelCase ( self : Optional[int]):
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = create_inputs(self.tool.inputs)
UpperCamelCase__ : Optional[Any] = self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : int = [outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs):
UpperCamelCase__ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[str] = create_inputs(self.tool.inputs)
UpperCamelCase__ : Optional[int] = []
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
UpperCamelCase__ : List[Any] = self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : int = [outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs)) | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : str = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase_ , architectures=['RobertaPreLayerNormForMaskedLM'])
# convert state_dict
UpperCamelCase__ : Tuple = torch.load(hf_hub_download(repo_id=lowerCamelCase_ , filename='pytorch_model.bin'))
UpperCamelCase__ : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.'):
UpperCamelCase__ : Tuple = 'roberta_prelayernorm.' + tensor_key[len('roberta.') :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight') or tensor_key.endswith('.self.LayerNorm.bias'):
continue
UpperCamelCase__ : Tuple = tensor_value
UpperCamelCase__ : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
# convert tokenizer
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained(lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Any = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Any = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
UpperCamelCase__ : Tuple = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Tuple = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
UpperCamelCase__ : Optional[int] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
| 709 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 0 |
from __future__ import annotations
import math
class __lowercase :
def __init__( self : Dict , UpperCAmelCase_ : int):
UpperCamelCase__ : Dict = size
# approximate the overall size of segment tree with given value
UpperCamelCase__ : Any = [0 for i in range(0 , 4 * size)]
# create array to store lazy update
UpperCamelCase__ : Tuple = [0 for i in range(0 , 4 * size)]
UpperCamelCase__ : List[str] = [0 for i in range(0 , 4 * size)] # flag for lazy update
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
return idx * 2
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int):
return idx * 2 + 1
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int]):
if left_element == right_element:
UpperCamelCase__ : Optional[int] = a[left_element - 1]
else:
UpperCamelCase__ : str = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.build(self.right(UpperCAmelCase_) , mid + 1 , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = max(
self.segment_tree[self.left(UpperCAmelCase_)] , self.segment_tree[self.right(UpperCAmelCase_)])
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if self.flag[idx] is True:
UpperCamelCase__ : Union[str, Any] = self.lazy[idx]
UpperCamelCase__ : List[Any] = False
if left_element != right_element:
UpperCamelCase__ : List[Any] = self.lazy[idx]
UpperCamelCase__ : str = self.lazy[idx]
UpperCamelCase__ : int = True
UpperCamelCase__ : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase__ : str = val
if left_element != right_element:
UpperCamelCase__ : List[Any] = val
UpperCamelCase__ : Tuple = val
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : int = True
return True
UpperCamelCase__ : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
self.update(self.right(UpperCAmelCase_) , mid + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = max(
self.segment_tree[self.left(UpperCAmelCase_)] , self.segment_tree[self.right(UpperCAmelCase_)])
return True
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if self.flag[idx] is True:
UpperCamelCase__ : Union[str, Any] = self.lazy[idx]
UpperCamelCase__ : Optional[int] = False
if left_element != right_element:
UpperCamelCase__ : Dict = self.lazy[idx]
UpperCamelCase__ : Optional[Any] = self.lazy[idx]
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Any = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase__ : Union[str, Any] = (left_element + right_element) // 2
UpperCamelCase__ : int = self.query(self.left(UpperCAmelCase_) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = self.query(self.right(UpperCAmelCase_) , mid + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return max(UpperCAmelCase_ , UpperCAmelCase_)
def __str__( self : List[str]):
return str([self.query(1 , 1 , self.size , UpperCAmelCase_ , UpperCAmelCase_) for i in range(1 , self.size + 1)])
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 710 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : str = question_encoder
UpperCamelCase__ : Optional[int] = generator
UpperCamelCase__ : Optional[int] = self.question_encoder
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any):
if os.path.isfile(UpperCAmelCase_):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
UpperCamelCase__ : int = os.path.join(UpperCAmelCase_ , 'question_encoder_tokenizer')
UpperCamelCase__ : Any = os.path.join(UpperCAmelCase_ , 'generator_tokenizer')
self.question_encoder.save_pretrained(UpperCAmelCase_)
self.generator.save_pretrained(UpperCAmelCase_)
@classmethod
def __UpperCamelCase ( cls : str , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase__ : Tuple = kwargs.pop('config' , UpperCAmelCase_)
if config is None:
UpperCamelCase__ : int = RagConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : int = AutoTokenizer.from_pretrained(
UpperCAmelCase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(
UpperCAmelCase_ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=UpperCAmelCase_ , generator=UpperCAmelCase_)
def __call__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
return self.current_tokenizer(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str):
return self.generator.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict):
return self.generator.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[Any] = self.question_encoder
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = self.generator
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "longest" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Tuple , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , UpperCAmelCase_ , )
if max_length is None:
UpperCamelCase__ : int = self.current_tokenizer.model_max_length
UpperCamelCase__ : List[Any] = self(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase__ : List[Any] = self.current_tokenizer.model_max_length
UpperCamelCase__ : Tuple = self(
text_target=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = labels['input_ids']
return model_inputs
| 711 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 0 |
'''simple docstring'''
import os
import numpy
import onnx
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = a.name
UpperCamelCase__ : Dict = b.name
UpperCamelCase__ : int = ''
UpperCamelCase__ : Dict = ''
UpperCamelCase__ : Any = a == b
UpperCamelCase__ : List[str] = name_a
UpperCamelCase__ : List[Any] = name_b
return res
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_)
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : int = list(model.graph.initializer)
UpperCamelCase__ : int = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase__ : Tuple = inits[i].name
UpperCamelCase__ : Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = os.path.dirname(lowerCamelCase_)
UpperCamelCase__ : List[Any] = os.path.basename(lowerCamelCase_)
UpperCamelCase__ : Any = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_))
UpperCamelCase__ : List[str] = list(model.graph.initializer)
UpperCamelCase__ : List[str] = set()
UpperCamelCase__ : int = {}
UpperCamelCase__ : int = []
UpperCamelCase__ : Tuple = 0
for i in range(len(lowerCamelCase_)):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(lowerCamelCase_)
dup_set.add(lowerCamelCase_)
UpperCamelCase__ : Any = inits[j].data_type
UpperCamelCase__ : List[str] = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_)
total_reduced_size += mem_size
UpperCamelCase__ : Tuple = inits[i].name
UpperCamelCase__ : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_)
else:
UpperCamelCase__ : List[str] = [name_j]
ind_to_replace.append((j, i))
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB')
UpperCamelCase__ : List[str] = sorted(lowerCamelCase_)
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Any = 'optimized_' + model_file_name
UpperCamelCase__ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
onnx.save(lowerCamelCase_ , lowerCamelCase_)
return new_model
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCAmelCase__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Union[str, Any] = {}
state_dict.pop('pixel_mean' , lowerCamelCase_)
state_dict.pop('pixel_std' , lowerCamelCase_)
UpperCamelCase__ : str = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase__ : int = key.replace(lowerCamelCase_ , lowerCamelCase_)
if re.match(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : int = int(re.match(lowerCamelCase_ , lowerCamelCase_).group(2))
if layer_nb == 0:
UpperCamelCase__ : Optional[Any] = key.replace('layers.0' , 'proj_in')
elif layer_nb == 1:
UpperCamelCase__ : List[str] = key.replace('layers.1' , 'layers.0')
elif layer_nb == 2:
UpperCamelCase__ : List[Any] = key.replace('layers.2' , 'proj_out')
UpperCamelCase__ : Any = value
UpperCamelCase__ : int = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="ybelkada/segment-anything") -> Any:
UpperCamelCase__ : Dict = hf_hub_download(lowerCamelCase_ , f'checkpoints/{model_name}.pth')
if "sam_vit_b" in model_name:
UpperCamelCase__ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase__ : Union[str, Any] = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCamelCase__ : Tuple = SamConfig(
vision_config=lowerCamelCase_ , )
elif "sam_vit_h" in model_name:
UpperCamelCase__ : Optional[Any] = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCamelCase__ : str = SamConfig(
vision_config=lowerCamelCase_ , )
UpperCamelCase__ : Optional[int] = torch.load(lowerCamelCase_ , map_location='cpu')
UpperCamelCase__ : Union[str, Any] = replace_keys(lowerCamelCase_)
UpperCamelCase__ : int = SamImageProcessor()
UpperCamelCase__ : Dict = SamProcessor(image_processor=lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = SamModel(lowerCamelCase_)
hf_model.load_state_dict(lowerCamelCase_)
UpperCamelCase__ : Dict = hf_model.to('cuda')
UpperCamelCase__ : Dict = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
UpperCamelCase__ : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw).convert('RGB')
UpperCamelCase__ : Dict = [[[400, 650]]]
UpperCamelCase__ : List[str] = [[1]]
UpperCamelCase__ : Any = processor(images=np.array(lowerCamelCase_) , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : Tuple = hf_model(**lowerCamelCase_)
UpperCamelCase__ : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
UpperCamelCase__ : Union[str, Any] = processor(
images=np.array(lowerCamelCase_) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : List[str] = hf_model(**lowerCamelCase_)
UpperCamelCase__ : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
UpperCamelCase__ : Tuple = ((75, 275, 1_725, 850),)
UpperCamelCase__ : str = processor(images=np.array(lowerCamelCase_) , input_boxes=lowerCamelCase_ , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : Tuple = hf_model(**lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
UpperCamelCase__ : str = [[[400, 650], [800, 650]]]
UpperCamelCase__ : Optional[int] = [[1, 1]]
UpperCamelCase__ : Any = processor(
images=np.array(lowerCamelCase_) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors='pt').to('cuda')
with torch.no_grad():
UpperCamelCase__ : List[str] = hf_model(**lowerCamelCase_)
UpperCamelCase__ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
lowerCAmelCase__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Optional[Any] = len(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = sum(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = [[False for x in range(s + 1)] for y in range(n + 1)]
for i in range(1 , n + 1):
UpperCamelCase__ : List[str] = True
for i in range(1 , s + 1):
UpperCamelCase__ : Any = False
for i in range(1 , n + 1):
for j in range(1 , s + 1):
UpperCamelCase__ : str = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCamelCase__ : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2) , -1 , -1):
if dp[n][j] is True:
UpperCamelCase__ : Any = s - 2 * j
break
return diff
| 714 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''pegasus'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , UpperCAmelCase_ : Optional[int]=50_265 , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Optional[int]=4_096 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Dict=4_096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=1_024 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]=1 , **UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : List[Any] = d_model
UpperCamelCase__ : Tuple = encoder_ffn_dim
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : str = encoder_attention_heads
UpperCamelCase__ : Optional[int] = decoder_ffn_dim
UpperCamelCase__ : Dict = decoder_layers
UpperCamelCase__ : Any = decoder_attention_heads
UpperCamelCase__ : Dict = dropout
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : List[Any] = activation_dropout
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : str = init_std
UpperCamelCase__ : Union[str, Any] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : Any = encoder_layers
UpperCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
@property
def __UpperCamelCase ( self : Dict):
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : List[Any]):
return self.d_model
| 715 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss']):
UpperCamelCase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Dict = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[int] = TensorFlowBenchmark(UpperCAmelCase_)
UpperCamelCase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = 'sgugger/tiny-distilbert-classification'
UpperCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , only_pretrain_model=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = TensorFlowBenchmark(UpperCAmelCase_)
UpperCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = TensorFlowBenchmark(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : int = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : int = AutoConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = TensorFlowBenchmark(UpperCAmelCase_ , [config])
UpperCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : str = TensorFlowBenchmark(UpperCAmelCase_ , [config])
UpperCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : int = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = TensorFlowBenchmark(UpperCAmelCase_)
UpperCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[int] = TensorFlowBenchmark(UpperCAmelCase_ , [config])
UpperCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Tuple = 'patrickvonplaten/t5-tiny-random'
UpperCamelCase__ : List[str] = AutoConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : str = TensorFlowBenchmark(UpperCAmelCase_ , configs=[config])
UpperCamelCase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU')) == 0 , 'Cannot do xla on CPU.')
def __UpperCamelCase ( self : int):
UpperCamelCase__ : str = 'sshleifer/tiny-gpt2'
UpperCamelCase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = TensorFlowBenchmark(UpperCAmelCase_)
UpperCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase_ , save_to_csv=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase_ , 'inf_time.csv') , inference_memory_csv_file=os.path.join(UpperCAmelCase_ , 'inf_mem.csv') , env_info_csv_file=os.path.join(UpperCAmelCase_ , 'env.csv') , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = TensorFlowBenchmark(UpperCAmelCase_)
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , 'inf_time.csv')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , 'inf_mem.csv')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , 'env.csv')).exists())
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(UpperCAmelCase_ : Union[str, Any]):
self.assertTrue(hasattr(UpperCAmelCase_ , 'sequential'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'cumulative'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'current'))
self.assertTrue(hasattr(UpperCAmelCase_ , 'total'))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase_ , 'log.txt') , log_print=UpperCAmelCase_ , trace_memory_line_by_line=UpperCAmelCase_ , eager_mode=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
UpperCamelCase__ : Any = TensorFlowBenchmark(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , 'log.txt')).exists())
| 716 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase__ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModel)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __lowercase (_BaseAutoModelClass ):
_lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowercase (__lowerCamelCase ):
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Tuple = params
UpperCamelCase__ : str = np.array(UpperCAmelCase_)
UpperCamelCase__ : Tuple = np.array([len(UpperCAmelCase_) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : int , UpperCAmelCase_ : List[Any]):
return (self.token_ids[index], self.lengths[index])
def __len__( self : Any):
return len(self.lengths)
def __UpperCamelCase ( self : Union[str, Any]):
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : str = self.params.max_model_input_size
UpperCamelCase__ : List[Any] = self.lengths > max_len
logger.info(F'Splitting {sum(UpperCAmelCase_)} too long sequences.')
def divide_chunks(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase_) , UpperCAmelCase_)]
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Tuple = []
if self.params.mlm:
UpperCamelCase__ : int = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
UpperCamelCase__ : Optional[Any] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
UpperCamelCase__ : Union[str, Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
UpperCamelCase__ : str = np.insert(UpperCAmelCase_ , 0 , UpperCAmelCase_)
if sub_s[-1] != sep_id:
UpperCamelCase__ : Dict = np.insert(UpperCAmelCase_ , len(UpperCAmelCase_) , UpperCAmelCase_)
assert len(UpperCAmelCase_) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase_)
new_tok_ids.extend(UpperCAmelCase_)
new_lengths.extend([len(UpperCAmelCase_) for l in sub_seqs])
UpperCamelCase__ : List[Any] = np.array(UpperCAmelCase_)
UpperCamelCase__ : Dict = np.array(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = len(self)
UpperCamelCase__ : Union[str, Any] = self.lengths > 11
UpperCamelCase__ : List[Any] = self.token_ids[indices]
UpperCamelCase__ : str = self.lengths[indices]
UpperCamelCase__ : Optional[int] = len(self)
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.')
def __UpperCamelCase ( self : Optional[Any]):
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
UpperCamelCase__ : Optional[int] = len(self)
UpperCamelCase__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
UpperCamelCase__ : Optional[int] = (unk_occs / self.lengths) < 0.5
UpperCamelCase__ : Union[str, Any] = self.token_ids[indices]
UpperCamelCase__ : str = self.lengths[indices]
UpperCamelCase__ : Optional[Any] = len(self)
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).')
def __UpperCamelCase ( self : Any):
if not self.params.is_master:
return
logger.info(F'{len(self)} sequences')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Optional[Any] = [t[0] for t in batch]
UpperCamelCase__ : Optional[int] = [t[1] for t in batch]
assert len(UpperCAmelCase_) == len(UpperCAmelCase_)
# Max for paddings
UpperCamelCase__ : List[str] = max(UpperCAmelCase_)
# Pad token ids
if self.params.mlm:
UpperCamelCase__ : Union[str, Any] = self.params.special_tok_ids['pad_token']
else:
UpperCamelCase__ : List[Any] = self.params.special_tok_ids['unk_token']
UpperCamelCase__ : Union[str, Any] = [list(t.astype(UpperCAmelCase_)) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_)) for t in token_ids]
assert len(tk_) == len(UpperCAmelCase_)
assert all(len(UpperCAmelCase_) == max_seq_len_ for t in tk_)
UpperCamelCase__ : List[str] = torch.tensor(tk_) # (bs, max_seq_len_)
UpperCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase_) # (bs)
return tk_t, lg_t
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = XLMRobertaModel.from_pretrained('xlm-roberta-base')
UpperCamelCase__ : Optional[Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]])
# The dog is cute and lives in the garden house
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : List[Any] = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(UpperCAmelCase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3))
@slow
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : List[Any] = XLMRobertaModel.from_pretrained('xlm-roberta-large')
UpperCamelCase__ : List[str] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]])
# The dog is cute and lives in the garden house
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 12, 1_024)) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase_)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3)) | 719 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 721 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 10
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Any = max(lowerCamelCase_)
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ : list[list] = [[] for _ in range(lowerCamelCase_)]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ : Any = int((i / placement) % RADIX)
buckets[tmp].append(lowerCamelCase_)
# put each buckets' contents into list_of_ints
UpperCamelCase__ : int = 0
for b in range(lowerCamelCase_):
for i in buckets[b]:
UpperCamelCase__ : int = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowercase (__lowerCamelCase , __lowerCamelCase ):
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : List[str]=[2, 4, 8, 16] , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[str]=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1e-5 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = patch_size
UpperCamelCase__ : Tuple = num_channels
UpperCamelCase__ : List[Any] = embed_dim
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : Dict = len(UpperCAmelCase_)
UpperCamelCase__ : str = num_heads
UpperCamelCase__ : List[Any] = kernel_size
UpperCamelCase__ : Union[str, Any] = mlp_ratio
UpperCamelCase__ : List[str] = qkv_bias
UpperCamelCase__ : List[Any] = hidden_dropout_prob
UpperCamelCase__ : int = attention_probs_dropout_prob
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : Tuple = int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
UpperCamelCase__ : List[str] = layer_scale_init_value
UpperCamelCase__ : List[str] = ['stem'] + [F'stage{idx}' for idx in range(1 , len(UpperCAmelCase_) + 1)]
UpperCamelCase__ : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
# Initialise PyTorch model
UpperCamelCase__ : int = FunnelConfig.from_json_file(lowerCamelCase_)
print(f'Building PyTorch model from configuration: {config}')
UpperCamelCase__ : Tuple = FunnelBaseModel(lowerCamelCase_) if base_model else FunnelModel(lowerCamelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True) -> Dict:
model.train()
UpperCamelCase__ : Any = model(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = F.mse_loss(lowerCamelCase_ , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> int:
set_seed(42)
UpperCamelCase__ : Any = RegressionModel()
UpperCamelCase__ : Optional[int] = deepcopy(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = RegressionDataset(length=80)
UpperCamelCase__ : Tuple = DataLoader(lowerCamelCase_ , batch_size=16)
model.to(accelerator.device)
if sched:
UpperCamelCase__ : str = AdamW(params=model.parameters() , lr=1e-3)
UpperCamelCase__ : str = AdamW(params=ddp_model.parameters() , lr=1e-3)
UpperCamelCase__ : Any = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_: epoch**0.65)
UpperCamelCase__ : str = LambdaLR(lowerCamelCase_ , lr_lambda=lambda lowerCamelCase_: epoch**0.65)
# Make a copy of `model`
if sched:
UpperCamelCase__ : Optional[int] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
UpperCamelCase__ : Dict = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCamelCase__ : Optional[Any] = get_training_setup(lowerCamelCase_)
# Use a single batch
UpperCamelCase__ : Union[str, Any] = next(iter(lowerCamelCase_)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : int = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
UpperCamelCase__ : Union[str, Any] = ddp_input[torch.randperm(len(lowerCamelCase_))]
def __UpperCAmelCase ( lowerCamelCase_) -> str:
# Test on distributed setup that context manager behaves properly
UpperCamelCase__ : int = get_training_setup(lowerCamelCase_)
# Use a single batch
UpperCamelCase__ : str = next(iter(lowerCamelCase_)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
# Sync grads
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
UpperCamelCase__ : Any = ddp_input[torch.randperm(len(lowerCamelCase_))]
def __UpperCAmelCase ( lowerCamelCase_=False , lowerCamelCase_=False) -> Dict:
UpperCamelCase__ : Optional[Any] = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2)
# Test that context manager behaves properly
UpperCamelCase__ : Tuple = get_training_setup(lowerCamelCase_)
for iteration, batch in enumerate(lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__ : int = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase_) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
UpperCamelCase__ : int = ddp_input[torch.randperm(len(lowerCamelCase_))]
GradientState._reset_state()
def __UpperCAmelCase ( lowerCamelCase_=False , lowerCamelCase_=False) -> Optional[int]:
UpperCamelCase__ : Dict = Accelerator(
split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ , gradient_accumulation_steps=2)
# Test that context manager behaves properly
UpperCamelCase__ : List[str] = get_training_setup(lowerCamelCase_ , lowerCamelCase_)
for iteration, batch in enumerate(lowerCamelCase_):
UpperCamelCase__ : int = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
UpperCamelCase__ : Optional[int] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase_)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase_):
step_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
UpperCamelCase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase_))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration)
GradientState._reset_state()
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : List[Any] = Accelerator()
UpperCamelCase__ : Dict = RegressionDataset(length=80)
UpperCamelCase__ : List[Any] = DataLoader(lowerCamelCase_ , batch_size=16)
UpperCamelCase__ : int = RegressionDataset(length=96)
UpperCamelCase__ : str = DataLoader(lowerCamelCase_ , batch_size=16)
UpperCamelCase__ : List[Any] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase_):
assert id(accelerator.gradient_state.active_dataloader) == id(lowerCamelCase_)
if iteration < len(lowerCamelCase_) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase_):
assert id(accelerator.gradient_state.active_dataloader) == id(lowerCamelCase_)
if batch_num < len(lowerCamelCase_) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __UpperCAmelCase ( ) -> Optional[int]:
UpperCamelCase__ : str = Accelerator()
UpperCamelCase__ : int = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**')
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**')
test_noop_sync(lowerCamelCase_)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**')
test_distributed_sync(lowerCamelCase_)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowerCamelCase_ , lowerCamelCase_)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0') or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = 'src/diffusers'
lowerCAmelCase__ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
return line.startswith(lowerCamelCase_) or len(lowerCamelCase_) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowerCamelCase_) is not None
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Dict = object_name.split('.')
UpperCamelCase__ : Any = 0
# First let's find the module where our object lives.
UpperCamelCase__ : str = parts[i]
while i < len(lowerCamelCase_) and not os.path.isfile(os.path.join(lowerCamelCase_ , f'{module}.py')):
i += 1
if i < len(lowerCamelCase_):
UpperCamelCase__ : Optional[int] = os.path.join(lowerCamelCase_ , parts[i])
if i >= len(lowerCamelCase_):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.')
with open(os.path.join(lowerCamelCase_ , f'{module}.py') , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase__ : List[str] = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase__ : int = ''
UpperCamelCase__ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase_) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase_):
raise ValueError(f' {object_name} does not match any function or class in {module}.')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase__ : Tuple = line_index
while line_index < len(lowerCamelCase_) and _should_continue(lines[line_index] , lowerCamelCase_):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase__ : List[str] = lines[start_index:line_index]
return "".join(lowerCamelCase_)
lowerCAmelCase__ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowerCAmelCase__ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
lowerCAmelCase__ = re.compile(R'<FILL\s+[^>]*>')
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Dict = code.split('\n')
UpperCamelCase__ : List[Any] = 0
while idx < len(lowerCamelCase_) and len(lines[idx]) == 0:
idx += 1
if idx < len(lowerCamelCase_):
return re.search(R'^(\s*)\S' , lines[idx]).groups()[0]
return ""
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Dict = len(get_indent(lowerCamelCase_)) > 0
if has_indent:
UpperCamelCase__ : Dict = f'class Bla:\n{code}'
UpperCamelCase__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCamelCase_)
UpperCamelCase__ : Tuple = black.format_str(lowerCamelCase_ , mode=lowerCamelCase_)
UpperCamelCase__ : Any = style_docstrings_in_code(lowerCamelCase_)
return result[len('class Bla:\n') :] if has_indent else result
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> Dict:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase__ : Optional[int] = f.readlines()
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase_):
UpperCamelCase__ : Any = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase__ : Tuple = search.groups()
UpperCamelCase__ : Optional[Any] = find_code_in_diffusers(lowerCamelCase_)
UpperCamelCase__ : Dict = get_indent(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase__ : Optional[int] = theoretical_indent
UpperCamelCase__ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase__ : List[Any] = True
while line_index < len(lowerCamelCase_) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase_):
break
UpperCamelCase__ : Dict = lines[line_index]
UpperCamelCase__ : List[str] = _should_continue(lowerCamelCase_ , lowerCamelCase_) and re.search(f'^{indent}# End copy' , lowerCamelCase_) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase__ : str = lines[start_index:line_index]
UpperCamelCase__ : Dict = ''.join(lowerCamelCase_)
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase__ : List[Any] = [line for line in theoretical_code.split('\n') if _re_copy_warning.search(lowerCamelCase_) is None]
UpperCamelCase__ : Dict = '\n'.join(lowerCamelCase_)
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase_) > 0:
UpperCamelCase__ : List[str] = replace_pattern.replace('with' , '').split(',')
UpperCamelCase__ : Optional[int] = [_re_replace_pattern.search(lowerCamelCase_) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase__ : List[str] = pattern.groups()
UpperCamelCase__ : Optional[Any] = re.sub(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
if option.strip() == "all-casing":
UpperCamelCase__ : Union[str, Any] = re.sub(obja.lower() , obja.lower() , lowerCamelCase_)
UpperCamelCase__ : Tuple = re.sub(obja.upper() , obja.upper() , lowerCamelCase_)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase__ : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code)
UpperCamelCase__ : List[str] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
UpperCamelCase__ : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase__ : str = start_index + 1
if overwrite and len(lowerCamelCase_) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.')
with open(lowerCamelCase_ , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(lowerCamelCase_)
return diffs
def __UpperCAmelCase ( lowerCamelCase_ = False) -> Dict:
UpperCamelCase__ : List[Any] = glob.glob(os.path.join(lowerCamelCase_ , '**/*.py') , recursive=lowerCamelCase_)
UpperCamelCase__ : Tuple = []
for filename in all_files:
UpperCamelCase__ : int = is_copy_consistent(lowerCamelCase_ , lowerCamelCase_)
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCamelCase_) > 0:
UpperCamelCase__ : int = '\n'.join(lowerCamelCase_)
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''microsoft/speecht5_tts'''
_lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_lowerCamelCase = '''text_reader'''
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ['''text''']
_lowerCamelCase = ['''audio''']
def __UpperCamelCase ( self : Union[str, Any]):
if self.post_processor is None:
UpperCamelCase__ : Optional[int] = 'microsoft/speecht5_hifigan'
super().setup()
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=None):
UpperCamelCase__ : List[Any] = self.pre_processor(text=UpperCAmelCase_ , return_tensors='pt' , truncation=UpperCAmelCase_)
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.')
UpperCamelCase__ : int = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation')
UpperCamelCase__ : Optional[int] = torch.tensor(embeddings_dataset[7_305]['xvector']).unsqueeze(0)
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
with torch.no_grad():
return self.model.generate_speech(**UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any]):
with torch.no_grad():
return self.post_processor(UpperCAmelCase_).cpu().detach()
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main() | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''CLIPImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = kwargs.pop('feature_extractor')
UpperCamelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Any):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
UpperCamelCase__ : Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if images is not None:
UpperCamelCase__ : Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
UpperCamelCase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = self.tokenizer.model_input_names
UpperCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self : int):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : str):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , )
return self.image_processor
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.