code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "openai/whisper-base"
__lowerCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__lowerCAmelCase = "transcriber"
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ["audio"]
__lowerCAmelCase = ["text"]
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
return self.pre_processor(__A , return_tensors='''pt''' ).input_features
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
return self.model.generate(inputs=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0] | 81 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case , snake_case=False ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = """"""
else:
_lowerCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCAmelCase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(snake_case )
_lowerCAmelCase = val
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=True ):
"""simple docstring"""
_lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCAmelCase = 8
# set labels if required
if not base_model:
_lowerCAmelCase = 10_00
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCAmelCase = 3_84
_lowerCAmelCase = 15_36
_lowerCAmelCase = 12
_lowerCAmelCase = 6
# load original model from torch hub
_lowerCAmelCase = torch.hub.load("""facebookresearch/dino:main""" , snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case )
_lowerCAmelCase = create_rename_keys(snake_case , base_model=snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
# load HuggingFace model
if base_model:
_lowerCAmelCase = ViTModel(snake_case , add_pooling_layer=snake_case ).eval()
else:
_lowerCAmelCase = ViTForImageClassification(snake_case ).eval()
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCAmelCase = ViTImageProcessor()
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCAmelCase = encoding["""pixel_values"""]
_lowerCAmelCase = model(snake_case )
if base_model:
_lowerCAmelCase = original_model(snake_case )
assert torch.allclose(snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowerCAmelCase = original_model(snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1E-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
A__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 82 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# fmt: off
_UpperCamelCase : Optional[Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCamelCase : str = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
def UpperCamelCase_ ( self : int ,**lowerCamelCase__ : str ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Any = 'tester'
_UpperCamelCase : Optional[int] = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase : Union[str, Any] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
_UpperCamelCase : List[Any] = tokenizer.encode([special_token] ,add_special_tokens=lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) ,1 )
_UpperCamelCase : Any = tokenizer.decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_input_output_texts(lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer.tokenize(lowerCamelCase__ )
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertNotEqual(len(lowerCamelCase__ ) ,0 )
_UpperCamelCase : Dict = tokenizer.decode(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(text_a.replace(' ' ,'' ) ,lowerCamelCase__ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
| 83 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__UpperCAmelCase = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__UpperCAmelCase = {
'allenai/longformer-base-4096': 40_96,
'allenai/longformer-large-4096': 40_96,
'allenai/longformer-large-4096-finetuned-triviaqa': 40_96,
'allenai/longformer-base-4096-extra.pos.embd.only': 40_96,
'allenai/longformer-large-4096-extra.pos.embd.only': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase_ :Any = bs[:]
lowerCAmelCase_ :Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase_ :Any = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _snake_case ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = set()
lowerCAmelCase_ :Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ :str = char
return pairs
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :int = ["input_ids", "attention_mask"]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
lowerCAmelCase_ :int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
lowerCAmelCase_ :int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
lowerCAmelCase_ :Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
lowerCAmelCase_ :Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
lowerCAmelCase_ :Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ :List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase_ :List[Any] = json.load(__A )
lowerCAmelCase_ :Any = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ :Any = errors # how to handle errors in decoding
lowerCAmelCase_ :Union[str, Any] = bytes_to_unicode()
lowerCAmelCase_ :Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase_ :List[Any] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase_ :Any = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :Optional[int] = {}
lowerCAmelCase_ :Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ :str = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , __A ) -> Any:
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ :Dict = tuple(__A )
lowerCAmelCase_ :List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
lowerCAmelCase_ :List[str] = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ :Any = bigram
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Tuple = 0
while i < len(__A ):
try:
lowerCAmelCase_ :Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ :Dict = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ :str = tuple(__A )
lowerCAmelCase_ :List[Any] = new_word
if len(__A ) == 1:
break
else:
lowerCAmelCase_ :List[Any] = get_pairs(__A )
lowerCAmelCase_ :List[str] = """ """.join(__A )
lowerCAmelCase_ :Dict = word
return word
def __lowerCAmelCase ( self , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = []
for token in re.findall(self.pat , __A ):
lowerCAmelCase_ :Optional[int] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(""" """ ) )
return bpe_tokens
def __lowerCAmelCase ( self , __A ) -> Tuple:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.decoder.get(__A )
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :str = """""".join(__A )
lowerCAmelCase_ :List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Any = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :str = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" )
lowerCAmelCase_ :str = 0
with open(__A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase_ :List[Any] = token_index
writer.write(""" """.join(__A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ :List[Any] = [self.cls_token_id]
lowerCAmelCase_ :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Optional[int] = [self.sep_token_id]
lowerCAmelCase_ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , __A , __A=False , **__A ) -> Tuple:
lowerCAmelCase_ :List[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
lowerCAmelCase_ :int = """ """ + text
return (text, kwargs)
| 84 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_( snake_case : list , snake_case : list ):
'''simple docstring'''
if len(snake_case ) != 2 or len(a[0] ) != 2 or len(snake_case ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
snake_case_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase_( snake_case : list , snake_case : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case ) )
]
def UpperCamelCase_( snake_case : list , snake_case : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case ) )
]
def UpperCamelCase_( snake_case : list ):
'''simple docstring'''
if len(snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
snake_case_ = len(snake_case )
snake_case_ = matrix_length // 2
snake_case_ = [[a[i][j] for j in range(snake_case , snake_case )] for i in range(snake_case )]
snake_case_ = [
[a[i][j] for j in range(snake_case , snake_case )] for i in range(snake_case , snake_case )
]
snake_case_ = [[a[i][j] for j in range(snake_case )] for i in range(snake_case )]
snake_case_ = [[a[i][j] for j in range(snake_case )] for i in range(snake_case , snake_case )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase_( snake_case : list ):
'''simple docstring'''
return len(snake_case ), len(matrix[0] )
def UpperCamelCase_( snake_case : list ):
'''simple docstring'''
print("\n".join(str(snake_case ) for line in matrix ) )
def UpperCamelCase_( snake_case : list , snake_case : list ):
'''simple docstring'''
if matrix_dimensions(snake_case ) == (2, 2):
return default_matrix_multiplication(snake_case , snake_case )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = split_matrix(snake_case )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = split_matrix(snake_case )
snake_case_ = actual_strassen(snake_case , matrix_subtraction(snake_case , snake_case ) )
snake_case_ = actual_strassen(matrix_addition(snake_case , snake_case ) , snake_case )
snake_case_ = actual_strassen(matrix_addition(snake_case , snake_case ) , snake_case )
snake_case_ = actual_strassen(snake_case , matrix_subtraction(snake_case , snake_case ) )
snake_case_ = actual_strassen(matrix_addition(snake_case , snake_case ) , matrix_addition(snake_case , snake_case ) )
snake_case_ = actual_strassen(matrix_subtraction(snake_case , snake_case ) , matrix_addition(snake_case , snake_case ) )
snake_case_ = actual_strassen(matrix_subtraction(snake_case , snake_case ) , matrix_addition(snake_case , snake_case ) )
snake_case_ = matrix_addition(matrix_subtraction(matrix_addition(snake_case , snake_case ) , snake_case ) , snake_case )
snake_case_ = matrix_addition(snake_case , snake_case )
snake_case_ = matrix_addition(snake_case , snake_case )
snake_case_ = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case , snake_case ) , snake_case ) , snake_case )
# construct the new matrix from our 4 quadrants
snake_case_ = []
for i in range(len(snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase_( snake_case : list , snake_case : list ):
'''simple docstring'''
if matrix_dimensions(snake_case )[1] != matrix_dimensions(snake_case )[0]:
snake_case_ = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'Matrix A: {matrixa}\n'
f'Matrix B: {matrixa}'
)
raise Exception(snake_case )
snake_case_ = matrix_dimensions(snake_case )
snake_case_ = matrix_dimensions(snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ = max(*snake_case , *snake_case )
snake_case_ = int(math.pow(2 , math.ceil(math.loga(snake_case ) ) ) )
snake_case_ = matrixa
snake_case_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ = actual_strassen(snake_case , snake_case )
# Removing the additional zeros
for i in range(0 , snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_SCREAMING_SNAKE_CASE : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 85 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__A : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__A : str = "question"
__A : str = "context"
__A : str = "answers"
@property
def __UpperCamelCase ( self : Dict ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 87 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
def a__ ( A_ ):
'''simple docstring'''
return 10 - x * x
def a__ ( A_, A_ ):
'''simple docstring'''
if equation(A_ ) * equation(A_ ) >= 0:
raise ValueError("""Wrong space!""" )
__magic_name__ = a
while (b - a) >= 0.01:
# Find middle point
__magic_name__ = (a + b) / 2
# Check if middle point is root
if equation(A_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A_ ) * equation(A_ ) < 0:
__magic_name__ = c
else:
__magic_name__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 88 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[int]:
_a : Union[str, Any] = len(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
for j in range(i + 1 , lowerCAmelCase_ ):
if numbers[j] < numbers[i]:
_a , _a : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 89 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
import math
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = input('Enter message: ' )
__lowerCamelCase = int(input(F"""Enter key [2-{len(UpperCamelCase__ ) - 1}]: """ ) )
__lowerCamelCase = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
__lowerCamelCase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('d' ):
__lowerCamelCase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = [''] * key
for col in range(UpperCamelCase__ ):
__lowerCamelCase = col
while pointer < len(UpperCamelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = math.ceil(len(UpperCamelCase__ ) / key )
__lowerCamelCase = key
__lowerCamelCase = (num_cols * num_rows) - len(UpperCamelCase__ )
__lowerCamelCase = [''] * num_cols
__lowerCamelCase = 0
__lowerCamelCase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowerCamelCase = 0
row += 1
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 90 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
UpperCAmelCase_ : Any = 50 # max width of layer names
UpperCAmelCase_ : List[Any] = 70 # max width of quantizer names
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=__a , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=__a , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=__a , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=__a , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=__a , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=__a , type=__a , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=__a , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A (__a ) -> List[str]:
"""simple docstring"""
if args.calibrator == "max":
SCREAMING_SNAKE_CASE_ : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''histogram'''
elif args.calibrator == "mse":
SCREAMING_SNAKE_CASE_ : List[str] = '''histogram'''
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
SCREAMING_SNAKE_CASE_ : List[str] = QuantDescriptor(num_bits=args.aprec , calib_method=__a )
SCREAMING_SNAKE_CASE_ : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__a )
quant_nn.QuantLinear.set_default_quant_desc_weight(__a )
def _A (__a , __a , __a=False , __a=False ) -> Optional[Any]:
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__a , ['''embeddings'''] , which='''weight''' , _disabled=__a )
if args.quant_disable:
set_quantizer_by_name(__a , [''''''] , _disabled=__a )
if args.quant_disable_keyword:
set_quantizer_by_name(__a , args.quant_disable_keyword , _disabled=__a )
if args.quant_disable_layer_module:
set_quantizer_by_name(__a , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=__a )
if args.quant_enable_layer_module:
set_quantizer_by_name(__a , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=__a )
if args.recalibrate_weights:
recalibrate_weights(__a )
if args.fuse_qkv:
fuse_qkv(__a , __a )
if args.clip_gelu:
clip_gelu(__a , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__a )
def _A (__a ) -> int:
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__a )
def _A (__a , __a ) -> Optional[int]:
"""simple docstring"""
def fusea(__a , __a , __a ):
for mod in [qq, qk, qv]:
if not hasattr(__a , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
SCREAMING_SNAKE_CASE_ : List[Any] = qq._amax.detach().item()
SCREAMING_SNAKE_CASE_ : Dict = qk._amax.detach().item()
SCREAMING_SNAKE_CASE_ : List[str] = qv._amax.detach().item()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(__a , __a , __a )
qq._amax.fill_(__a )
qk._amax.fill_(__a )
qv._amax.fill_(__a )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A (__a , __a ) -> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def _A (__a ) -> Dict:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__a , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mod.weight.shape[0]
SCREAMING_SNAKE_CASE_ : Tuple = mod._weight_quantizer._amax.detach()
SCREAMING_SNAKE_CASE_ : str = torch.ones(__a , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def _A (__a ) -> List[Any]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__a , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
SCREAMING_SNAKE_CASE_ : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
SCREAMING_SNAKE_CASE_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
SCREAMING_SNAKE_CASE_ : Any = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__a , keepdims=__a ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
SCREAMING_SNAKE_CASE_ : Dict = amax
def _A (__a , __a=25 , __a=1_80 , __a=None ) -> List[str]:
"""simple docstring"""
if ignore is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
elif not isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = [ignore]
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(__a , '''weight''' ):
continue
SCREAMING_SNAKE_CASE_ : List[str] = max(__a , len(__a ) )
for name, mod in model.named_modules():
SCREAMING_SNAKE_CASE_ : int = getattr(__a , '''_input_quantizer''' , __a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(__a , '''_weight_quantizer''' , __a )
if not hasattr(__a , '''weight''' ):
continue
if type(__a ) in ignore:
continue
if [True for s in ignore if type(__a ) is str and s in name]:
continue
SCREAMING_SNAKE_CASE_ : List[str] = f'Act:{input_q.extra_repr()}'
SCREAMING_SNAKE_CASE_ : Tuple = f'Wgt:{weight_q.extra_repr()}'
SCREAMING_SNAKE_CASE_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(__a ) <= line_width:
logger.info(__a )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def _A (__a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(__a , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def _A (__a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = getattr(__a , __a , __a )
if quantizer_mod is not None:
assert hasattr(__a , __a )
setattr(__a , __a , __a )
else:
logger.warning(f'{name} has no {quantizer}' )
def _A (__a , __a , __a="both" , **__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(__a , __a , '''_input_quantizer''' , __a , __a )
if which in ["weight", "both"]:
set_quantizer(__a , __a , '''_weight_quantizer''' , __a , __a )
logger.info(__a )
def _A (__a , __a , **__a ) -> List[str]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(__a , '''_input_quantizer''' ) or hasattr(__a , '''_weight_quantizer''' ):
for n in names:
if re.search(__a , __a ):
set_quantizers(__a , __a , **__a )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(__a , __a ):
SCREAMING_SNAKE_CASE_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(__a , __a , __a )
logger.info(__a )
| 91 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 - _cos) / 2
__lowerCAmelCase = 1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 + _cos) / 2
__lowerCAmelCase = -1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = _sin / 2
__lowerCAmelCase = 0
__lowerCAmelCase = -ba
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = 1 + alpha * big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha * big_a
__lowerCAmelCase = 1 + alpha / big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha / big_a
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha
__lowerCAmelCase = big_a * (pmc + aaa)
__lowerCAmelCase = 2 * big_a * mpc
__lowerCAmelCase = big_a * (pmc - aaa)
__lowerCAmelCase = ppmc + aaa
__lowerCAmelCase = -2 * pmpc
__lowerCAmelCase = ppmc - aaa
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha
__lowerCAmelCase = big_a * (ppmc + aaa)
__lowerCAmelCase = -2 * big_a * pmpc
__lowerCAmelCase = big_a * (ppmc - aaa)
__lowerCAmelCase = pmc + aaa
__lowerCAmelCase = 2 * mpc
__lowerCAmelCase = pmc - aaa
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 92 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = name
lowercase_ : List[Any] = value
lowercase_ : Tuple = weight
def __repr__( self ):
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _snake_case ( self ):
"""simple docstring"""
return self.value
def _snake_case ( self ):
"""simple docstring"""
return self.name
def _snake_case ( self ):
"""simple docstring"""
return self.weight
def _snake_case ( self ):
"""simple docstring"""
return self.value / self.weight
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Dict = sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE )
lowercase_ : int = []
lowercase_ , lowercase_ : Optional[Any] = 0.0, 0.0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class _snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE__ = ParquetConfig
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a :Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
a :Dict = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a :List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
a :List[str] = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a :Any = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCamelCase ):
with open(_lowerCamelCase , '''rb''' ) as f:
a :int = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a :Dict = table_cast(_lowerCamelCase , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase , '''rb''' ) as f:
a :str = pq.ParquetFile(_lowerCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
a :int = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_lowerCamelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' )
raise
| 94 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from manim import *
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[Any] =Rectangle(height=0.5 , width=0.5 )
a__ : List[Any] =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ : int =[mem.copy() for i in range(6 )]
a__ : Optional[int] =[mem.copy() for i in range(6 )]
a__ : Any =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
a__ : str =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
a__ : str =VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
a__ : str =Text("CPU" , font_size=2_4 )
a__ : Union[str, Any] =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
a__ : Optional[Any] =[mem.copy() for i in range(4 )]
a__ : Any =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
a__ : Union[str, Any] =Text("GPU" , font_size=2_4 )
a__ : Optional[int] =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
a__ : Union[str, Any] =[mem.copy() for i in range(6 )]
a__ : Tuple =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
a__ : List[str] =Text("Model" , font_size=2_4 )
a__ : Any =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
a__ : Optional[Any] =[]
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ : Optional[Any] =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
cpu_targs.append(lowerCAmelCase__ )
a__ : List[Any] =[mem.copy() for i in range(6 )]
a__ : int =VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
a__ : Tuple =Text("Loaded Checkpoint" , font_size=2_4 )
a__ : int =Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , aligned_edge=lowerCAmelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
a__ : Any =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ : Union[str, Any] =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
a__ : Any =MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) , Write(lowerCAmelCase__ ) )
self.play(Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
a__ : Optional[Any] =[]
a__ : Dict =[]
for i, rect in enumerate(lowerCAmelCase__ ):
a__ : Union[str, Any] =fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
first_animations.append(GrowFromCenter(lowerCAmelCase__ , run_time=1 ) )
a__ : Union[str, Any] =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(*lowerCAmelCase__ )
self.wait()
| 95 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( lowercase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCamelCase : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
_lowerCamelCase : List[Any] = PipelineDataFormat.from_str(
format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase__ , lowercase__ )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : List[Any] = nlp
_lowerCamelCase : Optional[Any] = reader
@staticmethod
def A_ ( lowercase ):
_lowerCamelCase : Optional[int] = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=lowercase , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=lowercase , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=lowercase , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=lowercase , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=lowercase , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=lowercase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=lowercase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=lowercase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self._nlp, []
for entry in self._reader:
_lowerCamelCase : Any = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCamelCase : Optional[Any] = self._reader.save_binary(lowercase )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(lowercase ) | 96 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def a ( __a ) -> datetime:
'''simple docstring'''
UpperCamelCase__ :Tuple = year % 19
UpperCamelCase__ :Optional[Any] = year % 4
UpperCamelCase__ :Dict = year % 7
UpperCamelCase__ :Union[str, Any] = math.floor(year / 100 )
UpperCamelCase__ :List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase__ :List[str] = leap_day_inhibits / 4
UpperCamelCase__ :Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase__ :List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase__ :List[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase__ :Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__snake_case = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""") | 97 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase ) + 1
UpperCAmelCase__ = len(lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase__ = [[0 for i in range(lowerCamelCase )] for j in range(lowerCamelCase )]
# since string of zero length match pattern of zero length
UpperCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCamelCase ):
UpperCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCamelCase ):
for j in range(1 , lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase__ = dp[i - 1][j]
else:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCAmelCase__ : List[Any] = 'aab'
lowerCAmelCase__ : List[Any] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 98 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : str = 1_6
lowercase : List[Any] = 3_2
def A_ ( A__ , A__ = 16 ) -> List[str]:
a__ : int = AutoTokenizer.from_pretrained('bert-base-cased' )
a__ : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
a__ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : int = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : Any = 16
elif accelerator.mixed_precision != "no":
a__ : Dict = 8
else:
a__ : Union[str, Any] = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
a__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def A_ ( A__ , A__ ) -> str:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , A__ ) == "1":
a__ : Tuple = 2
# Initialize accelerator
a__ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config['lr']
a__ : List[Any] = int(config['num_epochs'] )
a__ : List[str] = int(config['seed'] )
a__ : Union[str, Any] = int(config['batch_size'] )
a__ : Tuple = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
a__ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
a__ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
a__ , a__ : Dict = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
a__ : Any = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
a__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Dict = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Tuple = model(**A__ )
a__ : List[str] = outputs.loss
a__ : int = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Any = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : List[Any] = model(**A__ )
a__ : List[Any] = outputs.logits.argmax(dim=-1 )
a__ , a__ : Any = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(A__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=A__ , references=A__ , )
a__ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , A__ )
def A_ ( ) -> List[str]:
a__ : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
a__ : Optional[Any] = parser.parse_args()
a__ : str = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 99 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = "cpu" , lowerCAmelCase__ = "openai/clip-vit-large-patch14"):
__SCREAMING_SNAKE_CASE = device
__SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
__SCREAMING_SNAKE_CASE = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
__SCREAMING_SNAKE_CASE = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__SCREAMING_SNAKE_CASE = torchvision.transforms.Resize(2_2_4)
__SCREAMING_SNAKE_CASE = torchvision.transforms.CenterCrop(2_2_4)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.resize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.center_crop(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.normalize(lowerCAmelCase__)
return images
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.tokenizer(text=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.preprocess_img(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.01 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="image" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , ):
super().__init__()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = device if device else get_device()
if vqgan:
__SCREAMING_SNAKE_CASE = vqgan
else:
__SCREAMING_SNAKE_CASE = load_vqgan(self.device , conf_path=lowerCAmelCase__ , ckpt_path=lowerCAmelCase__)
self.vqgan.eval()
if clip:
__SCREAMING_SNAKE_CASE = clip
else:
__SCREAMING_SNAKE_CASE = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""")
self.clip.to(self.device)
__SCREAMING_SNAKE_CASE = ProcessorGradientFlow(device=self.device)
__SCREAMING_SNAKE_CASE = iterations
__SCREAMING_SNAKE_CASE = lr
__SCREAMING_SNAKE_CASE = log
__SCREAMING_SNAKE_CASE = make_grid
__SCREAMING_SNAKE_CASE = return_val
__SCREAMING_SNAKE_CASE = quantize
__SCREAMING_SNAKE_CASE = self.vqgan.decoder.z_shape
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=5 , lowerCAmelCase__=True):
__SCREAMING_SNAKE_CASE = []
if output_path is None:
__SCREAMING_SNAKE_CASE = """./animation.gif"""
if input_path is None:
__SCREAMING_SNAKE_CASE = self.save_path
__SCREAMING_SNAKE_CASE = sorted(glob(input_path + """/*"""))
if not len(lowerCAmelCase__):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""")
if len(lowerCAmelCase__) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""")
__SCREAMING_SNAKE_CASE = total_duration / len(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [frame_duration] * len(lowerCAmelCase__)
if extend_frames:
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = 3
for file_name in paths:
if file_name.endswith(""".png"""):
images.append(imageio.imread(lowerCAmelCase__))
imageio.mimsave(lowerCAmelCase__ , lowerCAmelCase__ , duration=lowerCAmelCase__)
print(f"gif saved to {output_path}")
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
if not (path or img):
raise ValueError("""Input either path or tensor""")
if img is not None:
raise NotImplementedError
__SCREAMING_SNAKE_CASE = preprocess(Image.open(lowerCAmelCase__) , target_image_size=2_5_6).to(self.device)
__SCREAMING_SNAKE_CASE = preprocess_vqgan(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE = self.vqgan.encode(lowerCAmelCase__)
return z
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.latent.detach().requires_grad_()
__SCREAMING_SNAKE_CASE = base_latent + transform_vector
if self.quantize:
__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE = self.vqgan.quantize(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = trans_latent
return self.vqgan.decode(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = self.clip_preprocessor(text=lowerCAmelCase__ , images=lowerCAmelCase__ , return_tensors="""pt""" , padding=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.clip(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = clip_outputs.logits_per_image
if weights is not None:
__SCREAMING_SNAKE_CASE = similarity_logits * weights
return similarity_logits.sum()
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase__ , weights=(1 / pos_prompts["""weights"""]))
if neg_prompts:
__SCREAMING_SNAKE_CASE = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase__ , weights=neg_prompts["""weights"""])
else:
__SCREAMING_SNAKE_CASE = torch.tensor([1] , device=self.device)
__SCREAMING_SNAKE_CASE = -torch.log(lowerCAmelCase__) + torch.log(lowerCAmelCase__)
return loss
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.randn_like(self.latent , requires_grad=lowerCAmelCase__ , device=self.device)
__SCREAMING_SNAKE_CASE = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__SCREAMING_SNAKE_CASE = self._add_vector(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = loop_post_process(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self._get_CLIP_loss(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
print("""CLIP loss""" , lowerCAmelCase__)
if self.log:
wandb.log({"""CLIP Loss""": clip_loss})
clip_loss.backward(retain_graph=lowerCAmelCase__)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
wandb.init(reinit=lowerCAmelCase__ , project="""face-editor""")
wandb.config.update({"""Positive Prompts""": positive_prompts})
wandb.config.update({"""Negative Prompts""": negative_prompts})
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations})
if image_path:
__SCREAMING_SNAKE_CASE = Image.open(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.resize((2_5_6, 2_5_6))
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase__))
def snake_case_ ( self , lowerCAmelCase__):
if not prompts:
return []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [prompt.strip() for prompt in prompts.split("""|""")]
for prompt in prompts:
if isinstance(lowerCAmelCase__ , (tuple, list)):
__SCREAMING_SNAKE_CASE = prompt[0]
__SCREAMING_SNAKE_CASE = float(prompt[1])
elif ":" in prompt:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = prompt.split(""":""")
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = prompt
__SCREAMING_SNAKE_CASE = 1.0
processed_prompts.append(lowerCAmelCase__)
weights.append(lowerCAmelCase__)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase__ , device=self.device),
}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , ):
if image_path:
__SCREAMING_SNAKE_CASE = self._get_latent(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
assert pos_prompts, "You must provide at least one positive prompt."
__SCREAMING_SNAKE_CASE = self.process_prompts(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.process_prompts(lowerCAmelCase__)
if save_final and save_path is None:
__SCREAMING_SNAKE_CASE = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""]))
if not os.path.exists(lowerCAmelCase__):
os.makedirs(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = save_path
__SCREAMING_SNAKE_CASE = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("""Original Image""")
show_pil(custom_to_pil(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = loop_post_process(lowerCAmelCase__)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)):
if show_intermediate:
show_pil(lowerCAmelCase__)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase__)})
if show_final:
show_pil(lowerCAmelCase__)
if save_final:
transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png"))
| 100 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( lowerCAmelCase__="ro" , lowerCAmelCase__="en" , lowerCAmelCase__="wmt16" , lowerCAmelCase__=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
lowercase = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
lowercase = datasets.load_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
if save_dir is None:
lowercase = f'{dataset}-{pair}'
lowercase = Path(lowerCAmelCase__ )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
lowercase = '''val''' if split == '''validation''' else split
lowercase = save_dir.joinpath(f'{fn}.source' )
lowercase = save_dir.joinpath(f'{fn}.target' )
lowercase = src_path.open('''w+''' )
lowercase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 101 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger()
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =field(default_factory=__snake_case )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__(self , a_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =1
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =True
def __call__(self , a_ ):
'''simple docstring'''
__snake_case : int = Tracker(self.dest )(a_ ).parametrized
__snake_case : Optional[int] = Tracker(self.src )(a_ ).parametrized
__snake_case : Union[str, Any] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
__snake_case : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(a_ )} operations while"""
f""" destination module has {len(a_ )}.""" )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"""Unexpected layer name {k}"""
__snake_case : Optional[int] = len(a_ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
__snake_case : List[Any] = nn.ModuleDict(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , a_ ):
'''simple docstring'''
if x not in self:
__snake_case : Tuple = self.convert_name_to_timm(a_ )
__snake_case : str = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
__snake_case : Optional[int] = super().__getitem__(a_ )
return val
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __getitem__(self , a_ ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__snake_case : List[Any] = RegNetModel
else:
__snake_case : List[str] = RegNetForImageClassification
return val
def lowercase ( _snake_case : Dict , _snake_case : List[str] , _snake_case : List[Tuple[str, str]] ) ->Optional[Any]:
"""simple docstring"""
for from_key, to_key in keys:
__snake_case : Union[str, Any] = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowercase ( _snake_case : str , _snake_case : Callable[[], nn.Module] , _snake_case : Callable[[], nn.Module] , _snake_case : RegNetConfig , _snake_case : Path , _snake_case : bool = True , ) ->Dict:
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
__snake_case , __snake_case : Optional[int] = from_model_func()
__snake_case : Tuple = our_model_func(_snake_case ).eval()
__snake_case : List[Any] = ModuleTransfer(src=_snake_case , dest=_snake_case , raise_if_mismatch=_snake_case )
__snake_case : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_snake_case )
if from_state_dict is not None:
__snake_case : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case : Tuple = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__snake_case : Union[str, Any] = manually_copy_vissl_head(_snake_case , our_model.state_dict() , _snake_case )
our_model.load_state_dict(_snake_case )
__snake_case : List[Any] = our_model(_snake_case , output_hidden_states=_snake_case )
__snake_case : Union[str, Any] = (
our_outputs.logits if isinstance(_snake_case , _snake_case ) else our_outputs.last_hidden_state
)
__snake_case : Optional[Any] = from_model(_snake_case )
__snake_case : Optional[Any] = from_output[-1] if type(_snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case : str = our_outputs.hidden_states[-1]
assert torch.allclose(_snake_case , _snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_snake_case , )
__snake_case : Any = 224 if '''seer''' not in name else 384
# we can use the convnext one
__snake_case : Dict = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
print(f"""Pushed {name}""" )
def lowercase ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = '''imagenet-1k-id2label.json'''
__snake_case : Optional[Any] = 1_000
__snake_case : int = (1, num_labels)
__snake_case : Optional[Any] = '''huggingface/label-files'''
__snake_case : Optional[int] = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case : str = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Union[str, Any] = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
__snake_case : int = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
__snake_case : int = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
__snake_case : int = NameToOurModelFuncMap()
__snake_case : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_snake_case : str , _snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__snake_case : Tuple = torch.hub.load_state_dict_from_url(_snake_case , model_dir=str(_snake_case ) , map_location='''cpu''' )
__snake_case : Dict = model_func()
# check if we have a head, if yes add it
__snake_case : Any = files['''classy_state_dict''']['''base_model''']['''model''']
__snake_case : Tuple = model_state_dict['''trunk''']
model.load_state_dict(_snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case : List[Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : str = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : int = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : Optional[Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case : Union[str, Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Optional[int] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : Optional[int] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _snake_case , _snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _snake_case , _snake_case , _snake_case , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 102 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
from bisect import bisect
from itertools import accumulate
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : str ):
lowerCAmelCase_ : Union[str, Any] = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Any = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase_ : Union[str, Any] = list(accumulate(__UpperCamelCase ) )
lowerCAmelCase_ : List[Any] = bisect(__UpperCamelCase ,__UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _A ( A__ ):
"""simple docstring"""
__lowercase = list(s_dict.keys() )
for key in keys:
__lowercase = R'''.*/layers_(\d+)'''
__lowercase = key
if re.match(A__ , A__ ):
__lowercase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , A__ )
__lowercase = R'''(encoder|decoder)\/'''
if re.match(A__ , A__ ):
__lowercase = re.match(A__ , A__ ).groups()
if groups[0] == "encoder":
__lowercase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , A__ )
__lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , A__ )
elif groups[0] == "decoder":
__lowercase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , A__ )
__lowercase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , A__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__lowercase = new_key.replace(A__ , A__ )
print(F"{key} -> {new_key}" )
__lowercase = s_dict.pop(A__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__lowercase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__lowercase = s_dict[key].shape[0]
__lowercase = s_dict[key]
for idx in range(A__ ):
__lowercase = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(A__ )
return s_dict
lowerCAmelCase__ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _A ( A__ , A__ ):
"""simple docstring"""
import regex as re
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
__lowercase = re.findall(R'''(.*) = ([0-9.]*)''' , A__ )
__lowercase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__lowercase = float(A__ ) if '''.''' in value else int(A__ )
__lowercase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , A__ )[0]
__lowercase = str(activation[1] )
__lowercase = num_experts
__lowercase = SwitchTransformersConfig(**A__ )
return config
def _A ( A__ , A__ , A__=None , A__="./" , A__=8 ):
"""simple docstring"""
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__lowercase = checkpoints.load_tax_checkpoint(A__ )
if gin_file is not None:
__lowercase = convert_gin_to_config(A__ , A__ )
else:
__lowercase = SwitchTransformersConfig.from_pretrained(A__ )
__lowercase = SwitchTransformersForConditionalGeneration(A__ )
__lowercase = flax_params['''target''']
__lowercase = flatten_dict(A__ , sep='''/''' )
__lowercase = rename_keys(A__ )
__lowercase = unflatten_dict(A__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(A__ , A__ )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowerCAmelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 104 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =None
lowerCamelCase : Dict =BloomTokenizerFast
lowerCamelCase : int =BloomTokenizerFast
lowerCamelCase : List[Any] =True
lowerCamelCase : Optional[int] =False
lowerCamelCase : Dict ="""tokenizer_file"""
lowerCamelCase : Union[str, Any] ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __a ( self ) -> str:
super().setUp()
a : Dict = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **lowerCAmelCase__ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Union[str, Any] = self.get_rust_tokenizer()
a : Dict = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
a : Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
a : Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase__ )["input_ids"]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__=6 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a : Optional[Any] = "This is a simple input"
a : str = ["This is a simple input 1", "This is a simple input 2"]
a : int = ("This is a simple input", "This is a pair")
a : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
a : Tuple = None # Hotfixing padding = None
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , )
def __a ( self ) -> Optional[int]:
a : Optional[int] = self.get_rust_tokenizer()
a : Tuple = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowerCAmelCase__ )
a : List[Any] = next(iter(lowerCAmelCase__ ) )["premise"] # pick up one data
a : Union[str, Any] = list(sample_data.values() )
a : Tuple = list(map(tokenizer.encode , lowerCAmelCase__ ) )
a : Union[str, Any] = [tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) for x in output_tokens]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 105 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( A_ ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowerCAmelCase : Dict = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
UpperCAmelCase : Any = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCAmelCase : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 109 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=UpperCamelCase_ , default=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=UpperCamelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=UpperCamelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=UpperCamelCase_ , default=4_2 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=UpperCamelCase_ , default=0 , help="cuda_id." , )
A__ = parser.parse_args()
return args
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Any ) -> Tuple:
'''simple docstring'''
if not len(UpperCamelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
A__ , A__ = imgs[0].size
A__ = Image.new("RGB" , size=(cols * w, rows * h) )
A__ , A__ = grid.size
for i, img in enumerate(UpperCamelCase_ ):
grid.paste(UpperCamelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Dict="robotic cat with wings" , SCREAMING_SNAKE_CASE_: Optional[int]=7.5 , SCREAMING_SNAKE_CASE_: Any=5_0 , SCREAMING_SNAKE_CASE_: Union[str, Any]=1 , SCREAMING_SNAKE_CASE_: List[Any]=4_2 , ) -> List[str]:
'''simple docstring'''
A__ = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase_ )
A__ = pipeline(
UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , ).images
A__ = int(math.sqrt(UpperCamelCase_ ) )
A__ = image_grid(UpperCamelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
lowerCAmelCase__ = unet.to(torch.device("""cuda""", args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 68 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = FunnelTokenizer
SCREAMING_SNAKE_CASE__ = FunnelTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :Tuple = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Tuple = '''UNwant\u00E9d,running'''
a :Dict = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.tokenizer_class(self.vocab_file )
a :Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
a :int = tokenizer('''UNwant\u00E9d,running''' )
a :List[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
a :Union[str, Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 94 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__SCREAMING_SNAKE_CASE : str = 'src/transformers'
__SCREAMING_SNAKE_CASE : str = 'docs/source/en/tasks'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ = f.readlines()
# Find the start prompt.
snake_case_ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
snake_case_ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__SCREAMING_SNAKE_CASE : Dict = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__SCREAMING_SNAKE_CASE : List[str] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = TASK_GUIDE_TO_MODELS[task_guide]
snake_case_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
snake_case_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[str]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
snake_case_ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 347 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
__UpperCamelCase :List[str] = 8
# DPR tok
__UpperCamelCase :Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCamelCase :Dict = os.path.join(self.tmpdirname , '''dpr_tokenizer''')
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase)
__UpperCamelCase :Dict = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
# BART tok
__UpperCamelCase :Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase :Any = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
__UpperCamelCase :List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :Optional[int] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , '''bart_tokenizer''')
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase)
__UpperCamelCase :str = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Any = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__UpperCAmelCase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__UpperCAmelCase))
def UpperCamelCase__ ( self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer'''))
def UpperCamelCase__ ( self) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer'''))
def UpperCamelCase__ ( self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer'''))
def UpperCamelCase__ ( self) -> List[str]:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
})
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Tuple = self.get_dummy_dataset()
__UpperCamelCase :Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''') as mock_load_dataset:
__UpperCamelCase :Dict = dataset
__UpperCamelCase :str = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Union[str, Any] = self.get_dummy_dataset()
__UpperCamelCase :Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__UpperCamelCase :int = os.path.join(self.tmpdirname , '''dataset''')
__UpperCamelCase :List[str] = os.path.join(self.tmpdirname , '''index.faiss''')
dataset.get_index('''embeddings''').save(os.path.join(self.tmpdirname , '''index.faiss'''))
dataset.drop_index('''embeddings''')
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset'''))
del dataset
__UpperCamelCase :Dict = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase :List[Any] = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase) , )
return retriever
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
})
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT)
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''')
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''')
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb'''))
__UpperCamelCase :int = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''')
__UpperCamelCase :Union[str, Any] = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb'''))
__UpperCamelCase :List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__UpperCamelCase :Union[str, Any] = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer())
return retriever
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Any = 1
__UpperCamelCase :Tuple = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase :Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCAmelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['''embeddings''', '''id''', '''text''', '''title'''])
self.assertEqual(len(doc_dicts[0]['''id''']) , __UpperCAmelCase)
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''') as mock_load_dataset:
__UpperCamelCase :str = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase)
__UpperCamelCase :Any = RagRetriever.from_pretrained(__UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :Tuple = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase :Tuple = retriever.retrieve(__UpperCAmelCase , n_docs=1)
self.assertTrue(out is not None)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Any = 1
__UpperCamelCase :Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase)
__UpperCamelCase :int = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCAmelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['''embeddings''', '''id''', '''text''', '''title'''])
self.assertEqual(len(doc_dicts[0]['''id''']) , __UpperCAmelCase)
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = RagRetriever.from_pretrained(__UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase :Tuple = retriever.retrieve(__UpperCAmelCase , n_docs=1)
self.assertTrue(out is not None)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = 1
__UpperCamelCase :Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase)
__UpperCamelCase :List[str] = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCAmelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['''embeddings''', '''id''', '''text''', '''title'''])
self.assertEqual(len(doc_dicts[0]['''id''']) , __UpperCAmelCase)
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = RagRetriever.from_pretrained(__UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :Optional[int] = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase :Any = retriever.retrieve(__UpperCAmelCase , n_docs=1)
self.assertTrue(out is not None)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = 1
__UpperCamelCase :List[Any] = self.get_dummy_legacy_index_retriever()
__UpperCamelCase :Any = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(__UpperCAmelCase) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ['''text''', '''title'''])
self.assertEqual(len(doc_dicts[0]['''text''']) , __UpperCAmelCase)
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''') # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''') # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase)
__UpperCamelCase :str = RagRetriever.from_pretrained(__UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase :str = retriever.retrieve(__UpperCAmelCase , n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self) -> List[Any]:
import torch
__UpperCamelCase :Any = 1
__UpperCamelCase :Any = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase :List[Any] = [[5, 7], [10, 11]]
__UpperCamelCase :Any = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase :int = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
__UpperCamelCase :Any = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase :int = 1
__UpperCamelCase :Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase)
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase)
__UpperCamelCase :Optional[int] = [[5, 7], [10, 11]]
__UpperCamelCase :int = np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
__UpperCamelCase :List[Any] = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase)
self.assertEqual(
len(__UpperCAmelCase) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''')) , __UpperCAmelCase) # check for doc token related keys in dictionary.
| 43 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase = """\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_UpperCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_UpperCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Optional[int] ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] = 1 , lowerCAmelCase : Union[str, Any] = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 173 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a = ["""note_seq"""]
def __init__( self : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Any , *UpperCamelCase : Any , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 115 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase_ = numpy.array([0, 0])
lowerCAmelCase_ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase_ = numpy.array([1, 0])
lowerCAmelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
snake_case_ : int = initial_vectors
for _ in range(UpperCamelCase_ ):
snake_case_ : int = iteration_step(UpperCamelCase_ )
return vectors
def lowerCamelCase_ ( _UpperCamelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
snake_case_ : Any = []
for i, start_vector in enumerate(vectors[:-1] ):
snake_case_ : List[Any] = vectors[i + 1]
new_vectors.append(UpperCamelCase_ )
snake_case_ : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> numpy.ndarray:
"""simple docstring"""
snake_case_ : Tuple = numpy.radians(UpperCamelCase_ )
snake_case_ , snake_case_ : str = numpy.cos(UpperCamelCase_ ), numpy.sin(UpperCamelCase_ )
snake_case_ : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : List[str] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
snake_case_ , snake_case_ : Dict = zip(*UpperCamelCase_ )
plt.plot(UpperCamelCase_ , UpperCamelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 279 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _UpperCAmelCase ,unittest.TestCase):
"""simple docstring"""
lowercase = KandinskyInpaintPipeline
lowercase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def __lowercase ( self : int ) -> int:
return 32
@property
def __lowercase ( self : int ) -> str:
return 32
@property
def __lowercase ( self : List[Any] ) -> int:
return self.time_input_dim
@property
def __lowercase ( self : List[Any] ) -> Any:
return self.time_input_dim * 4
@property
def __lowercase ( self : Tuple ) -> Any:
return 1_00
@property
def __lowercase ( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : Any = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __lowercase ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
lowerCAmelCase_ : List[str] = MultilingualCLIP(__UpperCAmelCase )
lowerCAmelCase_ : Dict = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : Dict ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase_ : Dict = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def __lowercase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Union[str, Any] = self.dummy_unet
lowerCAmelCase_ : Optional[Any] = self.dummy_movq
lowerCAmelCase_ : int = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , )
lowerCAmelCase_ : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=0 ) -> List[Any]:
lowerCAmelCase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase_ : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
lowerCAmelCase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase_ : Dict = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase_ : Dict = 0
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase_ : List[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase_ : str = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __lowercase ( self : Dict ) -> Dict:
lowerCAmelCase_ : Any = """cpu"""
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Optional[int] = self.pipeline_class(**__UpperCAmelCase )
lowerCAmelCase_ : List[Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
lowerCAmelCase_ : List[Any] = output.images
lowerCAmelCase_ : List[str] = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Dict = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowercase ( self : int ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Any ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowerCAmelCase_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase_ : Optional[int] = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : int = """a hat"""
lowerCAmelCase_ : Any = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowerCAmelCase_ : Union[str, Any] = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase_ : int = pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
lowerCAmelCase_ : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 120 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
snake_case_ = logging.getLogger(__name__)
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[Any] ) -> Tuple:
if os.path.exists(UpperCamelCase_ ):
if os.path.exists(os.path.join(UpperCamelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase_ , '''config.json''' ) ):
os.remove(os.path.join(UpperCamelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(UpperCamelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(UpperCamelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str]=False ) -> Any:
__snake_case = 2
if unlogit:
__snake_case = torch.pow(UpperCamelCase_ , UpperCamelCase_ )
__snake_case = p * torch.log(UpperCamelCase_ )
__snake_case = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(UpperCamelCase_ ) ) ) )
for row in range(len(UpperCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Optional[int]=None , snake_case_ : int=False ) -> Dict:
__snake_case , __snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case = torch.zeros(UpperCamelCase_ , UpperCamelCase_ ).to(args.device )
__snake_case = torch.zeros(UpperCamelCase_ , UpperCamelCase_ ).to(args.device )
if head_mask is None:
__snake_case = torch.ones(UpperCamelCase_ , UpperCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case = None
__snake_case = 0.0
__snake_case = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
__snake_case = tuple(t.to(args.device ) for t in inputs )
((__snake_case ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case = model(UpperCamelCase_ , labels=UpperCamelCase_ , head_mask=UpperCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case , __snake_case , __snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase_ ):
__snake_case = entropy(attn.detach() , UpperCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case = 2
__snake_case = torch.pow(torch.pow(UpperCamelCase_ , UpperCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(UpperCamelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(UpperCamelCase_ )
logger.info('''Head ranked by importance scores''' )
__snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case = torch.arange(
head_importance.numel() , device=args.device )
__snake_case = head_ranks.view_as(UpperCamelCase_ )
print_ad_tensor(UpperCamelCase_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> int:
__snake_case , __snake_case , __snake_case = compute_heads_importance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , compute_entropy=UpperCamelCase_ )
__snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , UpperCamelCase_ , original_score * args.masking_threshold )
__snake_case = torch.ones_like(UpperCamelCase_ )
__snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case = float('''Inf''' )
__snake_case = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
__snake_case = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
__snake_case = new_head_mask.view(-1 )
__snake_case = 0.0
__snake_case = new_head_mask.view_as(UpperCamelCase_ )
__snake_case = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase_ )
# Compute metric and head importance again
__snake_case , __snake_case , __snake_case = compute_heads_importance(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , compute_entropy=UpperCamelCase_ , head_mask=UpperCamelCase_ )
__snake_case = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , UpperCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(UpperCamelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : int , snake_case_ : int ) -> List[str]:
__snake_case = datetime.now()
__snake_case , __snake_case , __snake_case = compute_heads_importance(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , compute_entropy=UpperCamelCase_ , compute_importance=UpperCamelCase_ , head_mask=UpperCamelCase_ )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__snake_case = [
v,
]
assert sum(len(UpperCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase_ )
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = datetime.now()
__snake_case , __snake_case , __snake_case = compute_heads_importance(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , compute_entropy=UpperCamelCase_ , compute_importance=UpperCamelCase_ , head_mask=UpperCamelCase_ , actually_pruned=UpperCamelCase_ , )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , UpperCamelCase_ , UpperCamelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , UpperCamelCase_ , UpperCamelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(UpperCamelCase_ , args.output_dir )
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=UpperCamelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=UpperCamelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=UpperCamelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=UpperCamelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=UpperCamelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=UpperCamelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=UpperCamelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=UpperCamelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=UpperCamelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=UpperCamelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCamelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
__snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
__snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case = torch.device('''cuda''' , args.local_rank )
__snake_case = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case = nn.parallel.DistributedDataParallel(
UpperCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase_ )
elif args.n_gpu > 1:
__snake_case = nn.DataParallel(UpperCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase_ )
torch.save(UpperCamelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase_ )
# Prepare dataset
__snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case = (torch.from_numpy(UpperCamelCase_ ),)
__snake_case = TensorDataset(*UpperCamelCase_ )
__snake_case = RandomSampler(UpperCamelCase_ )
__snake_case = DataLoader(UpperCamelCase_ , sampler=UpperCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case = mask_heads(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
prune_heads(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 24 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ (UpperCamelCase__ : str = "AAPL" ):
_UpperCAmelCase : str = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
_UpperCAmelCase : int = BeautifulSoup(requests.get(UpperCamelCase_ ).text , '''html.parser''' )
_UpperCAmelCase : Union[str, Any] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 263 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_snake_case = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_snake_case = [ord(letter) for letter in string.ascii_lowercase]
_snake_case = {ord(char) for char in VALID_CHARS}
_snake_case = ['''the''', '''be''', '''to''', '''of''', '''and''', '''in''', '''that''', '''have''']
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str | None:
__UpperCAmelCase : Union[str, Any] = ""
__UpperCAmelCase : str = 42
__UpperCAmelCase : List[Any] = 42
__UpperCAmelCase : Optional[Any] = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase_ ), UpperCamelCase_ ):
__UpperCAmelCase : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase_ )
return decoded
def _UpperCamelCase ( snake_case__ ) -> list[str]:
__UpperCAmelCase : Optional[Any] = []
for key in product(UpperCamelCase_, repeat=3 ):
__UpperCAmelCase : int = try_key(UpperCamelCase_, UpperCamelCase_ )
if encoded is not None:
possibles.append(UpperCamelCase_ )
return possibles
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( snake_case__ = "p059_cipher.txt" ) -> int:
__UpperCAmelCase : Dict = 42
__UpperCAmelCase : List[Any] = 42
__UpperCAmelCase : Tuple = 42
__UpperCAmelCase : int = 42
__UpperCAmelCase : List[str] = Path(UpperCamelCase_ ).parent.joinpath(UpperCamelCase_ ).read_text(encoding="utf-8" )
__UpperCAmelCase : Any = [int(UpperCamelCase_ ) for number in data.strip().split("," )]
__UpperCAmelCase : List[Any] = filter_valid_chars(UpperCamelCase_ )
for common_word in COMMON_WORDS:
__UpperCAmelCase : Union[str, Any] = filter_common_word(UpperCamelCase_, UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
break
__UpperCAmelCase : Dict = possibles[0]
return sum(ord(UpperCamelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 157 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: PreTrainedTokenizer , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[int] = None , ) -> Optional[int]:
'''simple docstring'''
A__ = {}
if train_file is not None:
A__ = [train_file]
if eval_file is not None:
A__ = [eval_file]
if test_file is not None:
A__ = [test_file]
A__ = datasets.load_dataset("csv" , data_files=UpperCamelCase_ )
A__ = list(ds[list(files.keys() )[0]].features.keys() )
A__ = features_name.pop(UpperCamelCase_ )
A__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
A__ = {label: i for i, label in enumerate(UpperCamelCase_ )}
A__ = tokenizer.model_input_names
A__ = {}
if len(UpperCamelCase_ ) == 1:
for k in files.keys():
A__ = ds[k].map(
lambda SCREAMING_SNAKE_CASE_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="max_length" ) , batched=UpperCamelCase_ , )
elif len(UpperCamelCase_ ) == 2:
for k in files.keys():
A__ = ds[k].map(
lambda SCREAMING_SNAKE_CASE_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="max_length" , ) , batched=UpperCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A__ = {k: v for k, v in ex.items() if k in input_names}
A__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A__ = {k: v for k, v in ex.items() if k in input_names}
A__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A__ = {k: v for k, v in ex.items() if k in input_names}
A__ = labelaid[ex[label_name]]
yield (d, label)
A__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(metadata={'help': 'Which column contains the label'} )
__lowerCamelCase = field(default=_UpperCAmelCase , metadata={'help': 'The path of the training file'} )
__lowerCamelCase = field(default=_UpperCAmelCase , metadata={'help': 'The path of the development file'} )
__lowerCamelCase = field(default=_UpperCAmelCase , metadata={'help': 'The path of the test file'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(default=_UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ , A__ , A__ , A__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase_ ) , labelaid=UpperCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE_: EvalPrediction ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A__ = TFTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(UpperCamelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(UpperCamelCase_ )
return results
if __name__ == "__main__":
main()
| 68 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[list[str]] , UpperCAmelCase_ : int , ):
"""simple docstring"""
a :Optional[int] = len(UpperCamelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase_ , UpperCamelCase_ , )
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[Any] = []
depth_first_search([] , [] , [] , UpperCamelCase_ , UpperCamelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase_ )
print('''''' )
print(len(UpperCamelCase_ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__SCREAMING_SNAKE_CASE : List[Any] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=UpperCamelCase_ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = tmp_path_factory.getbasetemp() / """cache"""
snake_case_ = test_hf_cache_home / """datasets"""
snake_case_ = test_hf_cache_home / """metrics"""
snake_case_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(UpperCamelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(UpperCamelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(UpperCamelCase_ ) )
snake_case_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(UpperCamelCase_ ) )
snake_case_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase_ ) )
@pytest.fixture(autouse=UpperCamelCase_ , scope="""session""" )
def _a ( ) -> List[str]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase_ )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , UpperCamelCase_ )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , UpperCamelCase_ )
| 347 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
import numpy
# List of input, output pairs
__lowercase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowercase = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowercase = [2, 4, 1, 5]
__lowercase = len(train_data)
__lowercase = 0.0_0_9
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="train" ):
'''simple docstring'''
return calculate_hypothesis_value(UpperCamelCase_ , UpperCamelCase_ ) - output(
UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = 0
for i in range(len(UpperCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=m ):
'''simple docstring'''
__UpperCamelCase :Tuple = 0
for i in range(UpperCamelCase_ ):
if index == -1:
summation_value += _error(UpperCamelCase_ )
else:
summation_value += _error(UpperCamelCase_ ) * train_data[i][0][index]
return summation_value
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = summation_of_cost_derivative(UpperCamelCase_ , UpperCamelCase_ ) / m
return cost_derivative_value
def lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__UpperCamelCase :Union[str, Any] = 0.000_002
__UpperCamelCase :List[str] = 0
__UpperCamelCase :str = 0
while True:
j += 1
__UpperCamelCase :Dict = [0, 0, 0, 0]
for i in range(0 , len(UpperCamelCase_ ) ):
__UpperCamelCase :List[str] = get_cost_derivative(i - 1 )
__UpperCamelCase :Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_ , UpperCamelCase_ , atol=UpperCamelCase_ , rtol=UpperCamelCase_ , ):
break
__UpperCamelCase :Dict = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
print(('''Actual output value:''', output(UpperCamelCase_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(UpperCamelCase_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 43 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __magic_name__ ( lowercase ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =np.max(_outputs , axis=-1 , keepdims=UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: str =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase_ )
class a ( _UpperCAmelCase ):
UpperCamelCase : List[str] = 'sigmoid'
UpperCamelCase : Optional[Any] = 'softmax'
UpperCamelCase : Optional[Any] = 'none'
@add_end_docstrings(
_UpperCAmelCase , R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n ' , )
class a ( _UpperCAmelCase ):
UpperCamelCase : Tuple = False
UpperCamelCase : List[str] = ClassificationFunction.NONE
def __init__( self : Dict , **lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : int="" , **lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =tokenizer_kwargs
SCREAMING_SNAKE_CASE_: int ={}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
SCREAMING_SNAKE_CASE_: Tuple =self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
SCREAMING_SNAKE_CASE_: Dict =top_k
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __UpperCAmelCase , )
if return_all_scores:
SCREAMING_SNAKE_CASE_: List[Any] =None
else:
SCREAMING_SNAKE_CASE_: Tuple =1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE_: str =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE_: List[str] ="""top_k""" not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple ) -> Dict[str, GenericTensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : List[Any]=True ) -> Dict:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE_: Union[str, Any] =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE_: str =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE_: List[str] =ClassificationFunction.NONE
SCREAMING_SNAKE_CASE_: int =model_outputs["""logits"""][0]
SCREAMING_SNAKE_CASE_: Dict =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE_: Optional[int] =sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE_: Union[str, Any] =softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE_: List[str] =outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE_: Any =[
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
SCREAMING_SNAKE_CASE_: Tuple =dict_scores[:top_k]
return dict_scores
| 173 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
__UpperCAmelCase : Tuple = num
__UpperCAmelCase : Optional[int] = 0
while num > 0:
__UpperCAmelCase : Optional[int] = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
import os
from math import logaa
def lowerCamelCase_ ( _UpperCamelCase = "base_exp.txt" ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
snake_case_ , snake_case_ : Union[str, Any] = list(map(UpperCamelCase_ , line.split(''',''' ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
snake_case_ : Any = x * logaa(UpperCamelCase_ )
snake_case_ : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 279 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
'''simple docstring'''
import datasets
__A : List[str] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__A : int = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__A : Optional[int] = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n"
def UpperCamelCase_ ( A__ : str , A__ : Dict ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : Dict ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __lowercase ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Any ) -> int:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
| 120 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ = 8
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : List[str]=BITS ) -> Any:
__snake_case = x.device
__snake_case = (x * 255).int().clamp(0 , 255 )
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ )
__snake_case = rearrange(UpperCamelCase_ , '''d -> d 1 1''' )
__snake_case = rearrange(UpperCamelCase_ , '''b c h w -> b c 1 h w''' )
__snake_case = ((x & mask) != 0).float()
__snake_case = rearrange(UpperCamelCase_ , '''b c d h w -> b (c d) h w''' )
__snake_case = bits * 2 - 1
return bits
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict=BITS ) -> Dict:
__snake_case = x.device
__snake_case = (x > 0).int()
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ , dtype=torch.intaa )
__snake_case = rearrange(UpperCamelCase_ , '''d -> d 1 1''' )
__snake_case = rearrange(UpperCamelCase_ , '''b (c d) h w -> b c d h w''' , d=8 )
__snake_case = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCamelCase__ ( self : List[str] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : float = 0.0 , snake_case_ : bool = True , snake_case_ : Optional[Any]=None , snake_case_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case = self.alphas_cumprod[timestep]
__snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case = self._get_variance(UpperCamelCase_ , UpperCamelCase_ )
__snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case = model_output.device if torch.is_tensor(UpperCamelCase_ ) else '''cpu'''
__snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ ).to(UpperCamelCase_ )
__snake_case = self._get_variance(UpperCamelCase_ , UpperCamelCase_ ) ** 0.5 * eta * noise
__snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : Any="epsilon" , snake_case_ : List[Any]=None , snake_case_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
__snake_case = None
# 1. compute alphas, betas
__snake_case = self.alphas_cumprod[t]
__snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case = 1 - alpha_prod_t
__snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case = 0
if t > 0:
__snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase_ ).to(model_output.device )
__snake_case = (self._get_variance(UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
__snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Tuple , a__ : Union[str, Any] , a__ : int = 1.0 , ):
"""simple docstring"""
super().__init__()
__snake_case = bit_scale
__snake_case = (
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__(self : Optional[int] , a__ : List[Any] = 256 , a__ : str = 256 , a__ : str = 50 , a__ : str = None , a__ : Dict = 1 , a__ : List[Any] = "pil" , a__ : Any = True , **a__ : List[Any] , ):
"""simple docstring"""
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
__snake_case = decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
__snake_case = latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
__snake_case = bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
__snake_case = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 24 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase :str = logging.get_logger(__name__)
_lowerCAmelCase :Tuple = {}
_lowerCAmelCase :str = {}
_lowerCAmelCase :Any = {}
def lowerCamelCase_ (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_UpperCAmelCase : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
_UpperCAmelCase : Optional[int] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
_UpperCAmelCase : List[str] = format_type
def lowerCamelCase_ (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_UpperCAmelCase : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_UpperCAmelCase : List[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
_lowerCAmelCase :Optional[int] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
_lowerCAmelCase :Optional[Any] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
_lowerCAmelCase :int = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase_ (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase_ (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Dict ):
_UpperCAmelCase : Optional[Any] = get_format_type_from_alias(UpperCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 263 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _snake_case ( _UpperCAmelCase ):
lowerCamelCase__: Any = "microsoft/speecht5_tts"
lowerCamelCase__: Any = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowerCamelCase__: List[str] = "text_reader"
lowerCamelCase__: Dict = SpeechTaProcessor
lowerCamelCase__: Tuple = SpeechTaForTextToSpeech
lowerCamelCase__: List[str] = SpeechTaHifiGan
lowerCamelCase__: Union[str, Any] = ["text"]
lowerCamelCase__: List[str] = ["audio"]
def _lowerCamelCase ( self: Optional[int] ) -> str:
if self.post_processor is None:
__UpperCAmelCase : Tuple = "microsoft/speecht5_hifigan"
super().setup()
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any=None ) -> Optional[int]:
__UpperCAmelCase : Any = self.pre_processor(text=__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
__UpperCAmelCase : List[str] = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
__UpperCAmelCase : Union[str, Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, Any] ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[Any] ) -> Tuple:
with torch.no_grad():
return self.post_processor(__UpperCAmelCase ).cpu().detach()
| 157 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
A__ = BertTokenizer.from_pretrained("bert-base-uncased" )
A__ = bertabert.config.encoder.vocab_size
A__ = tokenizer.sep_token_id
A__ = tokenizer.cls_token_id
A__ = 128
A__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
A__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
A__ = train_dataset.select(range(32 ) )
A__ = val_dataset.select(range(16 ) )
A__ = 4
def _map_to_encoder_decoder_inputs(lowercase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ = tokenizer(batch["article"] , padding="max_length" , truncation=__UpperCAmelCase , max_length=512 )
A__ = tokenizer(batch["highlights"] , padding="max_length" , truncation=__UpperCAmelCase , max_length=128 )
A__ = inputs.input_ids
A__ = inputs.attention_mask
A__ = outputs.input_ids
A__ = outputs.input_ids.copy()
A__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
A__ = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase ):
A__ = pred.label_ids
A__ = pred.predictions
# all unnecessary tokens are removed
A__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
A__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
A__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
A__ = self.get_auto_remove_tmp_dir()
A__ = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy="steps" , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 68 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict="attention" ):
"""simple docstring"""
a :List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
a :List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
a :Dict = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
a :Optional[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
a :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
a :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
a :List[str] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
a :List[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
a :int = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
a :Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
a :List[Any] = (wi_a, wi_a)
else:
a :List[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
a :List[str] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :Tuple = traverse_util.flatten_dict(variables['''target'''] )
a :List[str] = {'''/'''.join(UpperCamelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a :Tuple = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCamelCase_ )
a :str = collections.OrderedDict()
# Shared embeddings.
a :Tuple = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase_ ):
# Block i, layer 0 (Self Attention).
a :Optional[int] = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
a , a , a , a :Union[str, Any] = tax_attention_lookup(UpperCamelCase_ , UpperCamelCase_ , '''encoder''' , '''attention''' )
a :int = layer_norm
a :int = k.T
a :int = o.T
a :Any = q.T
a :Optional[int] = v.T
# Block i, layer 1 (MLP).
a :Union[str, Any] = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
a , a :List[str] = tax_mlp_lookup(UpperCamelCase_ , UpperCamelCase_ , '''encoder''' , UpperCamelCase_ )
a :Tuple = layer_norm
if split_mlp_wi:
a :List[Any] = wi[0].T
a :Union[str, Any] = wi[1].T
else:
a :Tuple = wi.T
a :Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a :Dict = tax_relpos_bias_lookup(
UpperCamelCase_ , UpperCamelCase_ , '''encoder''' ).T
a :int = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
a :Any = tax_relpos_bias_lookup(
UpperCamelCase_ , 0 , '''encoder''' ).T
a :Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase_ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase_ ):
# Block i, layer 0 (Self Attention).
a :Any = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
a , a , a , a :Dict = tax_attention_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' , '''self_attention''' )
a :List[str] = layer_norm
a :Union[str, Any] = k.T
a :Dict = o.T
a :Optional[Any] = q.T
a :int = v.T
# Block i, layer 1 (Cross Attention).
a :Tuple = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
a , a , a , a :List[Any] = tax_attention_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
a :Any = layer_norm
a :str = k.T
a :Tuple = o.T
a :str = q.T
a :Optional[int] = v.T
# Block i, layer 2 (MLP).
a :Optional[Any] = tax_layer_norm_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
a , a :str = tax_mlp_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' , UpperCamelCase_ )
a :Any = layer_norm
if split_mlp_wi:
a :Optional[int] = wi[0].T
a :Optional[int] = wi[1].T
else:
a :Dict = wi.T
a :str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a :List[Any] = tax_relpos_bias_lookup(UpperCamelCase_ , UpperCamelCase_ , '''decoder''' ).T
a :str = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a :Dict = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a :List[str] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a :Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
a :List[str] = state_dict['''shared.weight''']
return state_dict
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
a :str = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
a :Union[str, Any] = convert_tax_to_pytorch(
UpperCamelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCamelCase_ , scalable_attention=UpperCamelCase_ )
a :List[Any] = make_state_dict(UpperCamelCase_ , UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :Any = MTaConfig.from_json_file(UpperCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a :List[Any] = UMTaEncoderModel(UpperCamelCase_ )
else:
a :List[Any] = UMTaForConditionalGeneration(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase_ )
print('''Done''' )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
snake_case : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 94 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__SCREAMING_SNAKE_CASE : int = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
state_dict.pop("""pixel_mean""" , UpperCamelCase_ )
state_dict.pop("""pixel_std""" , UpperCamelCase_ )
snake_case_ = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ = key.replace(UpperCamelCase_ , UpperCamelCase_ )
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
snake_case_ = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) )
if layer_nb == 0:
snake_case_ = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
snake_case_ = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
snake_case_ = key.replace("""layers.2""" , """proj_out""" )
snake_case_ = value
snake_case_ = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> Union[str, Any]:
snake_case_ = hf_hub_download(UpperCamelCase_ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
snake_case_ = SamConfig()
elif "sam_vit_l" in model_name:
snake_case_ = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case_ = SamConfig(
vision_config=UpperCamelCase_ , )
elif "sam_vit_h" in model_name:
snake_case_ = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case_ = SamConfig(
vision_config=UpperCamelCase_ , )
snake_case_ = torch.load(UpperCamelCase_ , map_location="""cpu""" )
snake_case_ = replace_keys(UpperCamelCase_ )
snake_case_ = SamImageProcessor()
snake_case_ = SamProcessor(image_processor=UpperCamelCase_ )
snake_case_ = SamModel(UpperCamelCase_ )
hf_model.load_state_dict(UpperCamelCase_ )
snake_case_ = hf_model.to("""cuda""" )
snake_case_ = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
snake_case_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
snake_case_ = [[[400, 650]]]
snake_case_ = [[1]]
snake_case_ = processor(images=np.array(UpperCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ = hf_model(**UpperCamelCase_ )
snake_case_ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case_ = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ = hf_model(**UpperCamelCase_ )
snake_case_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case_ = ((75, 275, 1_725, 850),)
snake_case_ = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ = hf_model(**UpperCamelCase_ )
snake_case_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case_ = [[[400, 650], [800, 650]]]
snake_case_ = [[1, 1]]
snake_case_ = processor(
images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case_ = hf_model(**UpperCamelCase_ )
snake_case_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : Optional[int] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 347 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def wrapper(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = timeit.default_timer()
__UpperCamelCase :List[Any] = func(*UpperCamelCase_ , **UpperCamelCase_ )
__UpperCamelCase :Dict = timeit.default_timer() - starttime
return delta
__UpperCamelCase :Union[str, Any] = func.__name__
return wrapper
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :List[Any] = seq_shapes or {}
for i in range(UpperCamelCase_ ):
__UpperCamelCase :Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase_ , _ArrayXD ):
__UpperCamelCase :List[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase_ , datasets.Value ):
if v.dtype == "string":
__UpperCamelCase :str = '''The small grey turtle was surprisingly fast when challenged.'''
else:
__UpperCamelCase :Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase_ , datasets.Sequence ):
while isinstance(UpperCamelCase_ , datasets.Sequence ):
__UpperCamelCase :Dict = v.feature
__UpperCamelCase :List[str] = seq_shapes[k]
__UpperCamelCase :int = np.random.rand(*UpperCamelCase_ ).astype(v.dtype )
__UpperCamelCase :List[str] = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__UpperCamelCase :str = generate_examples(UpperCamelCase_ , num_examples=UpperCamelCase_ , seq_shapes=UpperCamelCase_ )
with ArrowWriter(features=UpperCamelCase_ , path=UpperCamelCase_ ) as writer:
for key, record in dummy_data:
__UpperCamelCase :Optional[int] = features.encode_example(UpperCamelCase_ )
writer.write(UpperCamelCase_ )
__UpperCamelCase , __UpperCamelCase :List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__UpperCamelCase :int = datasets.Dataset.from_file(filename=UpperCamelCase_ , info=datasets.DatasetInfo(features=UpperCamelCase_ ) )
return dataset
| 43 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class a ( _UpperCAmelCase ):
UpperCamelCase : Union[str, Any] = 'camembert'
def __init__( self : str , lowerCAmelCase : Union[str, Any]=3_0522 , lowerCAmelCase : int=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Any=2 , lowerCAmelCase : int=0.0_2 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Any=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int =vocab_size
SCREAMING_SNAKE_CASE_: int =hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_: str =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_: str =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE_: Dict =type_vocab_size
SCREAMING_SNAKE_CASE_: List[str] =initializer_range
SCREAMING_SNAKE_CASE_: int =layer_norm_eps
SCREAMING_SNAKE_CASE_: str =position_embedding_type
SCREAMING_SNAKE_CASE_: str =use_cache
SCREAMING_SNAKE_CASE_: str =classifier_dropout
class a ( _UpperCAmelCase ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Optional[int] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 173 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def lowerCamelCase ( _UpperCamelCase : int ) -> datetime:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = year % 1_9
__UpperCAmelCase : Optional[int] = year % 4
__UpperCAmelCase : Any = year % 7
__UpperCAmelCase : Optional[Any] = math.floor(year / 1_0_0 )
__UpperCAmelCase : List[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__UpperCAmelCase : Any = leap_day_inhibits / 4
__UpperCAmelCase : Optional[Any] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__UpperCAmelCase : Any = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__UpperCAmelCase : Optional[int] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__UpperCAmelCase : Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 1_8 )
else:
return datetime(UpperCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCAmelCase : Optional[Any] = 'will be' if year > datetime.now().year else 'was'
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 115 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 279 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( _UpperCAmelCase):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'CLIPImageProcessor'
lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : str ) -> int:
lowerCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
lowerCAmelCase_ : Dict = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : Any , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=None , lowerCamelCase : List[str]=None , **lowerCamelCase : int ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase_ : Tuple = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
lowerCAmelCase_ : str = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
lowerCAmelCase_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowercase ( self : Union[str, Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : List[str] ) -> str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowercase ( self : Dict , *lowerCamelCase : Any , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self : List[str] ) -> Dict:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowercase ( self : int ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , )
return self.image_processor
| 120 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : int | float | str , snake_case_ : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
__snake_case = int(UpperCamelCase_ )
__snake_case = int(UpperCamelCase_ )
__snake_case = []
for temp in range(int(UpperCamelCase_ ) ):
series.append(f"""1 / {pow(temp + 1 , int(UpperCamelCase_ ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = int(input('Enter the last number (nth term) of the P-Series'))
snake_case_ = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 24 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
from ....utils import logging
_lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , A , A=None , A=2_0_4_8 ) -> int:
_UpperCAmelCase : Any = config.__dict__
_UpperCAmelCase : List[Any] = modal_hidden_size
if num_labels:
_UpperCAmelCase : List[Any] = num_labels
| 263 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Dict = len(UpperCamelCase_ )
__UpperCAmelCase : Tuple = len(matrix[0] )
__UpperCAmelCase : List[Any] = min(UpperCamelCase_, UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1, UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_, UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__UpperCAmelCase : List[str] = True
for i in range(row + 1, UpperCamelCase_ ):
if matrix[i][row] != 0:
__UpperCAmelCase , __UpperCAmelCase : Tuple = matrix[i], matrix[row]
__UpperCAmelCase : Tuple = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
__UpperCAmelCase : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase__ = """Usage of script: script_name <size_of_canvas:int>"""
lowerCAmelCase__ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list[list[bool]]:
'''simple docstring'''
A__ = [[False for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
return canvas
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(UpperCamelCase_ ):
for j, _ in enumerate(UpperCamelCase_ ):
A__ = bool(random.getrandbits(1 ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
A__ = np.array(UpperCamelCase_ )
A__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCamelCase_ ):
for c, pt in enumerate(UpperCamelCase_ ):
A__ = __judge_point(
UpperCamelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
A__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
A__ = current_canvas.tolist()
return return_canvas
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bool , SCREAMING_SNAKE_CASE_: list[list[bool]] ) -> bool:
'''simple docstring'''
A__ = 0
A__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
A__ = pt
if pt:
if alive < 2:
A__ = False
elif alive == 2 or alive == 3:
A__ = True
elif alive > 3:
A__ = False
else:
if alive == 3:
A__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase__ = create_canvas(canvas_size)
seed(c)
lowerCAmelCase__ , lowerCAmelCase__ = plt.subplots()
fig.show()
lowerCAmelCase__ = ListedColormap(["""w""", """k"""])
try:
while True:
lowerCAmelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 68 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
snake_case : List[str] = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
a :List[Any] = []
for num in range(len(UpperCamelCase_ ) ):
a :List[str] = 0
while 2 * i * i <= odd_composites[num]:
a :int = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase_ ) == n:
return list_nums
return []
def __lowerCamelCase ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 10
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 1
snake_case_ = max(UpperCamelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ = [[] for _ in range(UpperCamelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase_ )
# put each buckets' contents into list_of_ints
snake_case_ = 0
for b in range(UpperCamelCase_ ):
for i in buckets[b]:
snake_case_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
a__ : List[str] = (DPMSolverSinglestepScheduler,)
a__ : Dict = (("""num_inference_steps""", 2_5),)
def UpperCamelCase__ ( self , **__lowercase) -> Tuple:
__UpperCamelCase :str = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**__UpperCAmelCase)
return config
def UpperCamelCase__ ( self , __lowercase=0 , **__lowercase) -> str:
__UpperCamelCase :int = dict(self.forward_default_kwargs)
__UpperCamelCase :Any = kwargs.pop('''num_inference_steps''' , __UpperCAmelCase)
__UpperCamelCase :int = self.dummy_sample
__UpperCamelCase :List[Any] = 0.1 * sample
__UpperCamelCase :Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :Optional[Any] = self.get_scheduler_config(**__UpperCAmelCase)
__UpperCamelCase :Tuple = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
__UpperCamelCase :Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
__UpperCamelCase :int = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
__UpperCamelCase :Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase , __UpperCamelCase :Optional[Any] = sample, sample
for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1):
__UpperCamelCase :int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
__UpperCamelCase :Dict = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self) -> Tuple:
pass
def UpperCamelCase__ ( self , __lowercase=0 , **__lowercase) -> List[str]:
__UpperCamelCase :Tuple = dict(self.forward_default_kwargs)
__UpperCamelCase :str = kwargs.pop('''num_inference_steps''' , __UpperCAmelCase)
__UpperCamelCase :List[Any] = self.dummy_sample
__UpperCamelCase :List[Any] = 0.1 * sample
__UpperCamelCase :Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :Union[str, Any] = self.get_scheduler_config()
__UpperCamelCase :List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase :int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
__UpperCamelCase :int = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase :List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase :List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
__UpperCamelCase :Optional[int] = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , __lowercase=None , **__lowercase) -> Any:
if scheduler is None:
__UpperCamelCase :Tuple = self.scheduler_classes[0]
__UpperCamelCase :Union[str, Any] = self.get_scheduler_config(**__UpperCAmelCase)
__UpperCamelCase :Tuple = scheduler_class(**__UpperCAmelCase)
__UpperCamelCase :int = self.scheduler_classes[0]
__UpperCamelCase :Union[str, Any] = self.get_scheduler_config(**__UpperCAmelCase)
__UpperCamelCase :List[str] = scheduler_class(**__UpperCAmelCase)
__UpperCamelCase :Tuple = 10
__UpperCamelCase :int = self.dummy_model()
__UpperCamelCase :Dict = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
__UpperCamelCase :Tuple = model(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :Optional[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
__UpperCamelCase :Optional[int] = 50
__UpperCamelCase :List[str] = self.dummy_model()
__UpperCamelCase :Any = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
__UpperCamelCase :Tuple = model(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
__UpperCamelCase :Union[str, Any] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.25_74) < 1E-3
def UpperCamelCase__ ( self) -> Tuple:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
__UpperCamelCase :Optional[int] = self.full_loop(scheduler=__UpperCAmelCase)
__UpperCamelCase :Tuple = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.27_91) < 1E-3
__UpperCamelCase :List[Any] = DEISMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :Any = UniPCMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config)
__UpperCamelCase :Tuple = self.full_loop(scheduler=__UpperCAmelCase)
__UpperCamelCase :List[Any] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.27_91) < 1E-3
def UpperCamelCase__ ( self) -> Any:
self.check_over_configs(thresholding=__UpperCAmelCase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , )
def UpperCamelCase__ ( self) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def UpperCamelCase__ ( self) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , algorithm_type=__UpperCAmelCase , )
__UpperCamelCase :Any = self.full_loop(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , algorithm_type=__UpperCAmelCase , )
assert not torch.isnan(__UpperCAmelCase).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self) -> List[str]:
self.check_over_configs(lower_order_final=__UpperCAmelCase)
self.check_over_configs(lower_order_final=__UpperCAmelCase)
def UpperCamelCase__ ( self) -> int:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def UpperCamelCase__ ( self) -> Any:
self.check_over_configs(variance_type=__UpperCAmelCase)
self.check_over_configs(variance_type='''learned_range''')
def UpperCamelCase__ ( self) -> List[str]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = self.full_loop()
__UpperCamelCase :List[Any] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.27_91) < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = self.full_loop(use_karras_sigmas=__UpperCAmelCase)
__UpperCamelCase :str = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.22_48) < 1E-3
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = self.full_loop(prediction_type='''v_prediction''')
__UpperCamelCase :Tuple = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.14_53) < 1E-3
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Tuple = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCAmelCase)
__UpperCamelCase :Tuple = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.06_49) < 1E-3
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.scheduler_classes[0]
__UpperCamelCase :Union[str, Any] = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0)
__UpperCamelCase :List[Any] = scheduler_class(**__UpperCAmelCase)
__UpperCamelCase :List[Any] = 10
__UpperCamelCase :List[Any] = self.dummy_model()
__UpperCamelCase :Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
__UpperCamelCase :Any = model(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase :List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
assert sample.dtype == torch.floataa
| 43 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase = 16 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Any =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Any =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: str =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Union[str, Any] =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int =8
else:
SCREAMING_SNAKE_CASE_: str =None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Any =DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase = mocked_dataloaders # noqa: F811
def __magic_name__ ( lowercase , lowercase ):
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
SCREAMING_SNAKE_CASE_: Any =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE_: Optional[int] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: str =config["""lr"""]
SCREAMING_SNAKE_CASE_: Tuple =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: Tuple =int(config["""batch_size"""] )
set_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Dict =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: List[str] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: str =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Union[str, Any] =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Tuple =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Dict =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE_: Dict =os.path.split(UpperCamelCase_ )[-1].split(""".""" )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: List[str] =model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Tuple =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE_: Optional[Any] =loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] =model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: List[Any] =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE_: List[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(UpperCamelCase_ ),
"""epoch""": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=UpperCamelCase_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 173 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase : Tuple = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase : Any = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ConvBertTokenizer
def __init__( self : List[Any] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple=None , UpperCamelCase : str=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : str="[SEP]" , UpperCamelCase : Optional[int]="[PAD]" , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Tuple=None , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Union[str, Any] = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : Tuple = strip_accents
__UpperCAmelCase : Dict = tokenize_chinese_chars
__UpperCAmelCase : Tuple = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : Dict = do_lower_case
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Any=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[Any] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] = None ):
'''simple docstring'''
__UpperCAmelCase : Any = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 115 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import bisect
def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ):
'''simple docstring'''
if hi < 0:
lowerCAmelCase_ : Optional[Any] = len(UpperCamelCase_ )
while lo < hi:
lowerCAmelCase_ : int = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCAmelCase_ : Dict = mid + 1
else:
lowerCAmelCase_ : Optional[Any] = mid
return lo
def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ):
'''simple docstring'''
if hi < 0:
lowerCAmelCase_ : str = len(UpperCamelCase_ )
while lo < hi:
lowerCAmelCase_ : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCAmelCase_ : Union[str, Any] = mid + 1
else:
lowerCAmelCase_ : Any = mid
return lo
def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase_ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : int = len(UpperCamelCase_ ) - 1
while left <= right:
lowerCAmelCase_ : Union[str, Any] = left + (right - left) // 2
lowerCAmelCase_ : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCAmelCase_ : List[str] = midpoint - 1
else:
lowerCAmelCase_ : Optional[int] = midpoint + 1
return None
def UpperCamelCase_ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = bisect.bisect_left(UpperCamelCase_ , UpperCamelCase_ )
if index != len(UpperCamelCase_ ) and sorted_collection[index] == item:
return index
return None
def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if right < left:
return None
lowerCAmelCase_ : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(UpperCamelCase_ , UpperCamelCase_ , midpoint + 1 , UpperCamelCase_ )
if __name__ == "__main__":
__A : Tuple = input("Enter numbers separated by comma:\n").strip()
__A : Dict = sorted(int(item) for item in user_input.split(","))
__A : int = int(input("Enter a single number to be found in the list:\n"))
__A : Optional[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 120 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : Any = 42
A_ : int = 42
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Tuple , a__ : Tuple ):
"""simple docstring"""
__snake_case = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
__snake_case = Node(__UpperCAmelCase , self.head )
def __iter__(self : Any ):
"""simple docstring"""
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next_node
def __len__(self : Optional[Any] ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self : int ):
"""simple docstring"""
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def lowerCamelCase__ ( snake_case_ : SortedLinkedList , snake_case_ : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 24 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_lowerCAmelCase :List[Any] = logging.getLogger(__name__)
_lowerCAmelCase :List[str] = {'facebook/bart-base': BartForConditionalGeneration}
_lowerCAmelCase :Optional[Any] = {'facebook/bart-base': BartTokenizer}
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=UpperCamelCase_ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=UpperCamelCase_ , default=UpperCamelCase_ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase_ , )
parser.add_argument(
'''--config_name''' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='''Where to store the final ONNX file.''' )
_UpperCAmelCase : List[Any] = parser.parse_args()
return args
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]="cpu" ):
_UpperCAmelCase : Optional[Any] = model_dict[model_name].from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
_UpperCAmelCase : Optional[int] = tokenizer_dict[model_name].from_pretrained(UpperCamelCase_ )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[Any] = 0
return huggingface_model, tokenizer
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
model.eval()
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Any = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase_ ) )
with torch.no_grad():
_UpperCAmelCase : Dict = '''My friends are cool but they eat too many carbs.'''
_UpperCAmelCase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
_UpperCAmelCase : List[Any] = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCamelCase_ , max_length=UpperCamelCase_ , early_stopping=UpperCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCamelCase_ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCamelCase_ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=UpperCamelCase_ , )
logger.info('''Model exported to {}'''.format(UpperCamelCase_ ) )
_UpperCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(UpperCamelCase_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCamelCase_ ) )
_UpperCAmelCase : Tuple = onnxruntime.InferenceSession(UpperCamelCase_ )
_UpperCAmelCase : int = ort_sess.run(
UpperCamelCase_ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(UpperCamelCase_ ),
'''max_length''': np.array(UpperCamelCase_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = parse_args()
_UpperCAmelCase : List[Any] = 5
_UpperCAmelCase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase : Tuple = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = load_model_tokenizer(args.model_name_or_path , UpperCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(UpperCamelCase_ )
if args.max_length:
_UpperCAmelCase : Optional[Any] = args.max_length
if args.num_beams:
_UpperCAmelCase : Dict = args.num_beams
if args.output_file_path:
_UpperCAmelCase : Optional[int] = args.output_file_path
else:
_UpperCAmelCase : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 263 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('''cpu''')
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_ ).raw )
return im
def _UpperCamelCase ( snake_case__ ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[Any]:
__UpperCAmelCase : Tuple = dct.pop(UpperCamelCase_ )
__UpperCAmelCase : Any = val
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Tuple = []
for k in state_dict.keys():
__UpperCAmelCase : Optional[Any] = k
if ".pwconv" in k:
__UpperCAmelCase : List[Any] = k_new.replace(".pwconv", ".point_wise_conv" )
if ".dwconv" in k:
__UpperCAmelCase : Tuple = k_new.replace(".dwconv", ".depth_wise_conv" )
if ".Proj." in k:
__UpperCAmelCase : str = k_new.replace(".Proj.", ".proj." )
if "patch_embed" in k_new:
__UpperCAmelCase : int = k_new.replace("patch_embed", "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__UpperCAmelCase : Optional[Any] = k_new.split("." )
if ls[2].isdigit():
__UpperCAmelCase : Dict = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__UpperCAmelCase : int = k_new.replace("network", "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase : Any = 1000
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : Optional[Any] = "imagenet-1k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(UpperCamelCase_, UpperCamelCase_, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Dict = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCAmelCase : List[str] = [3, 3, 6, 4]
__UpperCAmelCase : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCAmelCase : Tuple = [3, 3, 9, 6]
__UpperCAmelCase : Any = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCAmelCase : Optional[int] = [4, 3, 10, 5]
__UpperCAmelCase : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCAmelCase : Any = [4, 4, 12, 6]
__UpperCAmelCase : str = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase_, map_location="cpu", check_hash=UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.load(UpperCamelCase_, map_location="cpu" )
__UpperCAmelCase : str = checkpoint
__UpperCAmelCase : Any = create_rename_keys(UpperCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
# load HuggingFace model
__UpperCAmelCase : List[str] = SwiftFormerForImageClassification(UpperCamelCase_ ).eval()
hf_model.load_state_dict(UpperCamelCase_ )
# prepare test inputs
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Any = ViTImageProcessor.from_pretrained("preprocessor_config" )
__UpperCAmelCase : Dict = processor(images=UpperCamelCase_, return_tensors="pt" )
# compare outputs from both models
__UpperCAmelCase : int = get_expected_output(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5], UpperCamelCase_, atol=1e-3 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 157 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = 'mobilenet_v1'
def __init__( self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.999 , lowercase=0.02 , lowercase=0.001 , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
A__ = num_channels
A__ = image_size
A__ = depth_multiplier
A__ = min_depth
A__ = hidden_act
A__ = tf_padding
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 68 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=__UpperCAmelCase , )
assert hasattr(self , '''env''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
a :Dict = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
a :List[str] = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
a :Optional[Any] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCAmelCase , py_version='''py36''' , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Any = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
a :Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a :str = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
a :List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a :Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCAmelCase )
| 94 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.array:
snake_case_ = f"""{sampling_rate}"""
snake_case_ = """1"""
snake_case_ = """f32le"""
snake_case_ = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case_ = ffmpeg_process.communicate(UpperCamelCase_ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
snake_case_ = output_stream[0]
snake_case_ = np.frombuffer(UpperCamelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "f32le" , ) -> Any:
snake_case_ = f"""{sampling_rate}"""
snake_case_ = """1"""
if format_for_conversion == "s16le":
snake_case_ = 2
elif format_for_conversion == "f32le":
snake_case_ = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
snake_case_ = platform.system()
if system == "Linux":
snake_case_ = """alsa"""
snake_case_ = """default"""
elif system == "Darwin":
snake_case_ = """avfoundation"""
snake_case_ = """:0"""
elif system == "Windows":
snake_case_ = """dshow"""
snake_case_ = """default"""
snake_case_ = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
snake_case_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case_ = _ffmpeg_stream(UpperCamelCase_ , UpperCamelCase_ )
for item in iterator:
yield item
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "f32le" , ) -> str:
if stream_chunk_s is not None:
snake_case_ = stream_chunk_s
else:
snake_case_ = chunk_length_s
snake_case_ = ffmpeg_microphone(UpperCamelCase_ , UpperCamelCase_ , format_for_conversion=UpperCamelCase_ )
if format_for_conversion == "s16le":
snake_case_ = np.intaa
snake_case_ = 2
elif format_for_conversion == "f32le":
snake_case_ = np.floataa
snake_case_ = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
snake_case_ = chunk_length_s / 6
snake_case_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase_ , (int, float) ):
snake_case_ = [stride_length_s, stride_length_s]
snake_case_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case_ = datetime.datetime.now()
snake_case_ = datetime.timedelta(seconds=UpperCamelCase_ )
for item in chunk_bytes_iter(UpperCamelCase_ , UpperCamelCase_ , stride=(stride_left, stride_right) , stream=UpperCamelCase_ ):
# Put everything back in numpy scale
snake_case_ = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase_ )
snake_case_ = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
snake_case_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> List[str]:
snake_case_ = B""""""
snake_case_ , snake_case_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
snake_case_ = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase_ ) < chunk_len:
snake_case_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase_ ) >= chunk_len:
# We are flushing the accumulator
snake_case_ = (_stride_left, stride_right)
snake_case_ = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
snake_case_ = False
yield item
snake_case_ = stride_left
snake_case_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase_ ) > stride_left:
snake_case_ = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
snake_case_ = False
yield item
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase_ , stdout=subprocess.PIPE , bufsize=UpperCamelCase_ ) as ffmpeg_process:
while True:
snake_case_ = ffmpeg_process.stdout.read(UpperCamelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 347 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase_ :
'''simple docstring'''
a__ : Optional[int] = XGLMConfig
a__ : Union[str, Any] = {}
a__ : Optional[int] = """gelu"""
def __init__( self , __lowercase , __lowercase=14 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=0.02 , ) -> Union[str, Any]:
__UpperCamelCase :Dict = parent
__UpperCamelCase :List[Any] = batch_size
__UpperCamelCase :Tuple = seq_length
__UpperCamelCase :Any = is_training
__UpperCamelCase :Optional[int] = use_input_mask
__UpperCamelCase :Dict = use_labels
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Any = d_model
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :Optional[Any] = ffn_dim
__UpperCamelCase :Dict = activation_function
__UpperCamelCase :Union[str, Any] = activation_dropout
__UpperCamelCase :Union[str, Any] = attention_dropout
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :Tuple = initializer_range
__UpperCamelCase :int = None
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :List[Any] = 1
def UpperCamelCase__ ( self) -> Tuple:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''')
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
__UpperCamelCase :Optional[Any] = None
if self.use_input_mask:
__UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = self.get_config()
__UpperCamelCase :List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase__ ( self) -> List[str]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Optional[int] = config_and_inputs
__UpperCamelCase :int = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a__ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
a__ : Dict = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
a__ : List[str] = False
a__ : Optional[Any] = False
a__ : Dict = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Union[str, Any] = TFXGLMModelTester(self)
__UpperCamelCase :Union[str, Any] = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37)
def UpperCamelCase__ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def UpperCamelCase__ ( self) -> str:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[Any] = TFXGLMModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self , __lowercase=True) -> Any:
__UpperCamelCase :str = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :str = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase :str = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
__UpperCamelCase :Optional[int] = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase)
@slow
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :Optional[Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
tf.random.set_seed(0)
__UpperCamelCase :str = tokenizer('''Today is a nice day and''' , return_tensors='''tf''')
__UpperCamelCase :List[str] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0'''):
__UpperCamelCase :List[Any] = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0])
__UpperCamelCase :Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase)
__UpperCamelCase :Dict = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
@slow
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :int = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
__UpperCamelCase :Dict = '''left'''
# use different length sentences to test batching
__UpperCamelCase :Union[str, Any] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__UpperCamelCase :Union[str, Any] = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase)
__UpperCamelCase :Dict = inputs['''input_ids''']
__UpperCamelCase :List[Any] = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12)
__UpperCamelCase :Optional[Any] = tokenizer(sentences[0] , return_tensors='''tf''').input_ids
__UpperCamelCase :Optional[Any] = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12)
__UpperCamelCase :str = tokenizer(sentences[1] , return_tensors='''tf''').input_ids
__UpperCamelCase :List[Any] = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12)
__UpperCamelCase :Any = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
__UpperCamelCase :List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase)
__UpperCamelCase :Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence])
| 43 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_UpperCAmelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( lowercase , lowercase , lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: Optional[Any] =random.Random()
SCREAMING_SNAKE_CASE_: Optional[int] =1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _ in range(UpperCamelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: int =np.array(UpperCamelCase_ , dtype=jnp.intaa ).reshape(UpperCamelCase_ )
return output
def __magic_name__ ( lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: int =ids_tensor(UpperCamelCase_ , vocab_size=2 , rng=UpperCamelCase_ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Tuple =1
return attn_mask
@require_flax
class a :
UpperCamelCase : Tuple = None
UpperCamelCase : Any = ()
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: Union[str, Any] =2
SCREAMING_SNAKE_CASE_: int =inputs["""input_ids"""].shape[-1] // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] =inputs["""input_ids"""][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: str =jnp.ones_like(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Any =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Union[str, Any] =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int =False
SCREAMING_SNAKE_CASE_: Dict =max_length
SCREAMING_SNAKE_CASE_: Dict =0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any =model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: int =getattr(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =pt_model_class(__UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_: str =load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
SCREAMING_SNAKE_CASE_: Any =flax_model.generate(__UpperCAmelCase ).sequences
SCREAMING_SNAKE_CASE_: Union[str, Any] =pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: Tuple =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] =False
SCREAMING_SNAKE_CASE_: List[str] =max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =jit(model.generate )
SCREAMING_SNAKE_CASE_: int =jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: Tuple =max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int =jit(model.generate )
SCREAMING_SNAKE_CASE_: List[Any] =jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any =False
SCREAMING_SNAKE_CASE_: List[Any] =max_length
SCREAMING_SNAKE_CASE_: Union[str, Any] =2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =jit(model.generate )
SCREAMING_SNAKE_CASE_: int =jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
SCREAMING_SNAKE_CASE_: Dict =max_length
SCREAMING_SNAKE_CASE_: str =2
SCREAMING_SNAKE_CASE_: str =2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str =True
SCREAMING_SNAKE_CASE_: List[str] =max_length
SCREAMING_SNAKE_CASE_: Any =0.8
SCREAMING_SNAKE_CASE_: Optional[int] =10
SCREAMING_SNAKE_CASE_: Optional[int] =0.3
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Union[str, Any] =8
SCREAMING_SNAKE_CASE_: Optional[Any] =9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =jit(model.generate )
SCREAMING_SNAKE_CASE_: Optional[int] =jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: List[str] =max_length
SCREAMING_SNAKE_CASE_: Optional[Any] =1
SCREAMING_SNAKE_CASE_: int =8
SCREAMING_SNAKE_CASE_: Optional[int] =9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Dict =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int =jit(model.generate )
SCREAMING_SNAKE_CASE_: Optional[int] =jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: List[str] =max_length
SCREAMING_SNAKE_CASE_: List[Any] =2
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: str =8
SCREAMING_SNAKE_CASE_: Dict =9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any =model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str =jit(model.generate )
SCREAMING_SNAKE_CASE_: str =jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: int =attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_: int =False
SCREAMING_SNAKE_CASE_: Dict =max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jit(model.generate )
SCREAMING_SNAKE_CASE_: List[Any] =jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Optional[int] =attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_: List[Any] =True
SCREAMING_SNAKE_CASE_: Union[str, Any] =max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jit(model.generate )
SCREAMING_SNAKE_CASE_: List[Any] =jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] =attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_: List[str] =2
SCREAMING_SNAKE_CASE_: Any =max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str =model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jit(model.generate )
SCREAMING_SNAKE_CASE_: Tuple =jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
SCREAMING_SNAKE_CASE_: int =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE_: List[str] ="""Hello world"""
SCREAMING_SNAKE_CASE_: Any =tokenizer(__UpperCAmelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , """do_samples""" ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , """foo""" ):
SCREAMING_SNAKE_CASE_: List[Any] ={"""foo""": """bar"""}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 173 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> str:
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 115 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCAmelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCAmelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCAmelCase , __UpperCAmelCase , sample_weight=__UpperCAmelCase ) ),
}
| 279 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[int] = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : Optional[Any] = 42
A_ : Union[str, Any] = None
A_ : Optional[int] = None
A_ : int = None
A_ : Optional[Any] = None
def a (self : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = _str_to_version_tuple(self.version_str )
def __repr__(self : Tuple ):
"""simple docstring"""
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def a (self : Optional[int] ):
"""simple docstring"""
return self.major, self.minor, self.patch
def a (self : Tuple , a__ : List[Any] ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return Version(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return other
raise TypeError(f"""{other} (type {type(__UpperCAmelCase )}) cannot be compared to version.""" )
def __eq__(self : Optional[Any] , a__ : int ):
"""simple docstring"""
try:
__snake_case = self._validate_operand(__UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self : Optional[int] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self._validate_operand(__UpperCAmelCase )
return self.tuple < other.tuple
def __hash__(self : Optional[Any] ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def a (cls : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def a (self : List[str] ):
"""simple docstring"""
return self.version_str
def lowerCamelCase__ ( snake_case_ : Any ) -> int:
__snake_case = _VERSION_REG.match(UpperCamelCase_ )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(UpperCamelCase_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> Optional[Any]:
return ".".join(str(UpperCamelCase_ ) for v in version_tuple )
| 24 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_lowerCAmelCase :str = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_lowerCAmelCase :Dict = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = set()
_UpperCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : Tuple = char
_UpperCAmelCase : Union[str, Any] = set(UpperCamelCase_ )
return pairs
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A , A="__start__" , A="__end__" , A="__unk__" , A="__null__" , **A , ) -> List[str]:
super().__init__(unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , **__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase : List[str] = json.load(__UpperCAmelCase )
_UpperCAmelCase : List[str] = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
_UpperCAmelCase : int = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase : Dict = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_UpperCAmelCase : Dict = {}
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.encoder )
def __lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , A ) -> str:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : Tuple = re.sub('''([.,!?()])''' , r''' \1''' , __UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = re.sub('''(\')''' , r''' \1 ''' , __UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = re.sub(r'''\s{2,}''' , ''' ''' , __UpperCAmelCase )
if "\n" in token:
_UpperCAmelCase : str = token.replace('''\n''' , ''' __newln__''' )
_UpperCAmelCase : Optional[Any] = token.split(''' ''' )
_UpperCAmelCase : Optional[Any] = []
for token in tokens:
if not len(__UpperCAmelCase ):
continue
_UpperCAmelCase : Dict = token.lower()
_UpperCAmelCase : Any = tuple(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_UpperCAmelCase : str = get_pairs(__UpperCAmelCase )
if not pairs:
words.append(__UpperCAmelCase )
continue
while True:
_UpperCAmelCase : Union[str, Any] = min(__UpperCAmelCase , key=lambda A : self.bpe_ranks.get(__UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase : List[str] = bigram
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[Any] = 0
while i < len(__UpperCAmelCase ):
try:
_UpperCAmelCase : Tuple = word.index(__UpperCAmelCase , __UpperCAmelCase )
new_word.extend(word[i:j] )
_UpperCAmelCase : int = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : int = tuple(__UpperCAmelCase )
_UpperCAmelCase : Dict = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_UpperCAmelCase : Any = get_pairs(__UpperCAmelCase )
_UpperCAmelCase : str = '''@@ '''.join(__UpperCAmelCase )
_UpperCAmelCase : Tuple = word[:-4]
_UpperCAmelCase : Dict = word
words.append(__UpperCAmelCase )
return " ".join(__UpperCAmelCase )
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = re.findall(r'''\S+\n?''' , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(''' ''' ) ) )
return split_tokens
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : int = token.lower()
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , A ) -> str:
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : str = ''' '''.join(__UpperCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '''\n''' )
_UpperCAmelCase : Any = 0
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_UpperCAmelCase : Optional[Any] = token_index
writer.write(''' '''.join(__UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 263 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _snake_case ( _UpperCAmelCase ):
def __init__( self: Any , __lowerCamelCase: int , __lowerCamelCase: Dict ) -> str:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self: Tuple , __lowerCamelCase: int = 1 , __lowerCamelCase: Optional[int] = 1_00 , __lowerCamelCase: Optional[Any] = None , __lowerCamelCase: List[str] = None , __lowerCamelCase: Union[str, Any] = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__UpperCAmelCase : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase : Dict = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase : Dict = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__UpperCAmelCase : Union[str, Any] = int(__UpperCAmelCase )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase : List[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
__UpperCAmelCase : List[Any] = int(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase : int = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase , device=audio.device )
__UpperCAmelCase : int = self.scheduler.timesteps.to(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase : int = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase : int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
__UpperCAmelCase : Dict = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__UpperCAmelCase )
| 157 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str ) -> str:
'''simple docstring'''
A__ = len(UpperCamelCase_ )
A__ = len(UpperCamelCase_ )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(UpperCamelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCamelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 68 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ):
a :Union[str, Any] = parent
a :Tuple = batch_size
a :int = image_size
a :str = patch_size
a :int = num_channels
a :Optional[int] = embed_dim
a :Optional[Any] = depths
a :Any = num_heads
a :Union[str, Any] = window_size
a :Any = mlp_ratio
a :Tuple = qkv_bias
a :Tuple = hidden_dropout_prob
a :Any = attention_probs_dropout_prob
a :List[Any] = drop_path_rate
a :str = hidden_act
a :List[Any] = use_absolute_embeddings
a :Tuple = patch_norm
a :List[str] = layer_norm_eps
a :str = initializer_range
a :Dict = is_training
a :List[str] = scope
a :str = use_labels
a :Union[str, Any] = type_sequence_label_size
a :Any = encoder_stride
a :Dict = out_features
a :Any = out_indices
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a :str = None
if self.use_labels:
a :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a :Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Any = MaskFormerSwinModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a :List[Any] = model(__UpperCAmelCase )
a :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = MaskFormerSwinBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a :List[Any] = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__UpperCAmelCase ):
a :List[Any] = ['''stem''']
a :Any = MaskFormerSwinBackbone(config=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.prepare_config_and_inputs()
a , a , a :List[str] = config_and_inputs
a :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = MaskFormerSwinModelTester(self )
a :int = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
return
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a :Optional[Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a :Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a :int = model_class(__UpperCAmelCase )
a :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a :Tuple = [*signature.parameters.keys()]
a :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
a :Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
a :List[Any] = outputs.hidden_states
a :Union[str, Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swin has a different seq_length
a :Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a :str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a :int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a :Optional[int] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a :List[Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :str = self.model_tester.prepare_config_and_inputs_for_common()
a :Optional[int] = 3
a :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a :List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a :Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a :List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a :Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a :Any = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase ):
a :int = 0
return t
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
with torch.no_grad():
a :Tuple = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
a :Optional[Any] = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__UpperCAmelCase ) , set_nan_tensor_to_zero(__UpperCAmelCase ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}.'''
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
a :Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a :str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
a :Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a :int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a :List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a :Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
a :Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'''output_hidden_states''': True} )
a :Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a :Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'''output_hidden_states''': True} )
@require_torch
class _snake_case ( unittest.TestCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = MaskFormerSwinConfig
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = MaskFormerSwinModelTester(self )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a :Optional[Any] = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
a :Optional[int] = backbone_class(__UpperCAmelCase )
backbone.to(__UpperCAmelCase )
backbone.eval()
a :Optional[Any] = backbone(**__UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
a :Tuple = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
a , a , a :int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
a :Any = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 347 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowercase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowercase = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(UpperCamelCase_ ) - np.asarray(UpperCamelCase_ )) ** 2 ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 43 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE_: Tuple =FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE_: str =0.01
with locka.acquire():
with pytest.raises(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE_: List[Any] =time.time()
locka.acquire(UpperCamelCase_ )
assert time.time() - _start > timeout
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any ="""a""" * 1000 + """.lock"""
SCREAMING_SNAKE_CASE_: str =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
SCREAMING_SNAKE_CASE_: List[Any] =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase_ ):
locka.acquire(0 )
| 173 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = BlipImageProcessor()
__UpperCAmelCase : Any = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__UpperCAmelCase : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__UpperCAmelCase : List[Any] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : str ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def lowerCamelCase__ ( self : str , **UpperCamelCase : Dict ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Any = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__UpperCAmelCase : Any = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = self.get_qformer_tokenizer()
__UpperCAmelCase : Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Any = self.prepare_image_inputs()
__UpperCAmelCase : List[str] = image_processor(__UpperCAmelCase , return_tensors="""np""" )
__UpperCAmelCase : Optional[Any] = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_qformer_tokenizer()
__UpperCAmelCase : Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = """lower newer"""
__UpperCAmelCase : Optional[int] = processor(text=__UpperCAmelCase )
__UpperCAmelCase : Any = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Tuple = self.get_qformer_tokenizer()
__UpperCAmelCase : Tuple = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
__UpperCAmelCase : str = """lower newer"""
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : int = self.get_qformer_tokenizer()
__UpperCAmelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
__UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : List[str] = processor.batch_decode(__UpperCAmelCase )
__UpperCAmelCase : int = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Optional[int] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = self.get_qformer_tokenizer()
__UpperCAmelCase : List[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
__UpperCAmelCase : str = """lower newer"""
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : Optional[int] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 115 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__A : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__A : Optional[int] = logging.getLogger()
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase_ : int = parser.parse_args()
return args.f
def UpperCamelCase_ ( A__ : int , A__ : int="eval" ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = os.path.join(UpperCamelCase_ , f'{split}_results.json' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(f'can\'t find {path}' )
__A : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( _UpperCAmelCase):
"""simple docstring"""
def __lowercase ( self : Any ) -> Any:
lowerCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : str = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_flax_glue.main()
lowerCAmelCase_ : List[str] = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def __lowercase ( self : Any ) -> Optional[int]:
lowerCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Tuple = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_clm_flax.main()
lowerCAmelCase_ : List[Any] = get_results(__UpperCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def __lowercase ( self : Union[str, Any] ) -> str:
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Tuple = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_summarization_flax.main()
lowerCAmelCase_ : Any = get_results(__UpperCAmelCase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : str = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_mlm_flax.main()
lowerCAmelCase_ : List[Any] = get_results(__UpperCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_ta_mlm_flax.main()
lowerCAmelCase_ : int = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def __lowercase ( self : Dict ) -> List[str]:
lowerCAmelCase_ : List[str] = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_flax_ner.main()
lowerCAmelCase_ : Optional[Any] = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def __lowercase ( self : Any ) -> List[Any]:
lowerCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[str] = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
run_qa.main()
lowerCAmelCase_ : int = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 120 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
snake_case_ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : List[Any] , a__ : Union[str, Any] , a__ : List[Any] , a__ : Optional[int] = None , a__ : Optional[int] = None ):
"""simple docstring"""
__snake_case = None
__snake_case = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__snake_case = os.path.abspath('''examples''' )
for item in os.listdir(__UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
__snake_case = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCAmelCase , feature_script=__UpperCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__snake_case = compare_against_test(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case = '''\n'''.join(__UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
__snake_case = diff.replace(__UpperCAmelCase , '''''' )
self.assertEqual(__UpperCAmelCase , '''''' )
def a (self : Optional[int] ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , __UpperCAmelCase )
self.one_complete_example('''complete_nlp_example.py''' , __UpperCAmelCase )
def a (self : Any ):
"""simple docstring"""
__snake_case = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__snake_case = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''\"accuracy\": eval_metric[\"accuracy\"],\n\n''',
''' ''' * 20 + '''\"f1\": eval_metric[\"f1\"],\n\n''',
''' ''' * 20 + '''\"train_loss\": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''\"epoch\": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.one_complete_example('''complete_cv_example.py''' , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Any = False
@classmethod
def a (cls : Optional[int] ):
"""simple docstring"""
super().setUpClass()
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__snake_case = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def a (cls : Optional[int] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = f"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def a (self : Any ):
"""simple docstring"""
__snake_case = f"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split()
__snake_case = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = f"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n """.split()
__snake_case = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
self.assertNotIn('''epoch 0:''' , __UpperCAmelCase )
self.assertIn('''epoch 1:''' , __UpperCAmelCase )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = f"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n """.split()
__snake_case = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
if torch.cuda.is_available():
__snake_case = torch.cuda.device_count()
else:
__snake_case = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __UpperCAmelCase )
self.assertIn('''epoch 1:''' , __UpperCAmelCase )
else:
self.assertIn('''epoch 0:''' , __UpperCAmelCase )
self.assertIn('''epoch 1:''' , __UpperCAmelCase )
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = '''\n examples/by_feature/cross_validation.py\n --num_folds 2\n '''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__snake_case = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
__snake_case = re.findall('''({.+})''' , __UpperCAmelCase )
__snake_case = [r for r in results if '''accuracy''' in r][-1]
__snake_case = ast.literal_eval(__UpperCAmelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def a (self : str ):
"""simple docstring"""
__snake_case = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def a (self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__snake_case = f"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''tracking''' ) ) )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def a (self : Any ):
"""simple docstring"""
__snake_case = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 24 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
a__ =LEDConfig
a__ ={}
a__ ='''gelu'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=False , A=9_9 , A=3_2 , A=2 , A=4 , A=3_7 , A=0.1 , A=0.1 , A=2_0 , A=2 , A=1 , A=0 , A=4 , ) -> str:
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Dict = eos_token_id
_UpperCAmelCase : List[str] = pad_token_id
_UpperCAmelCase : str = bos_token_id
_UpperCAmelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCAmelCase : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCAmelCase : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCAmelCase : Any = prepare_led_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase : List[Any] = tf.concat(
[tf.zeros_like(__UpperCAmelCase )[:, :-1], tf.ones_like(__UpperCAmelCase )[:, -1:]] , axis=-1 , )
_UpperCAmelCase : Any = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self , A , A ) -> int:
_UpperCAmelCase : Optional[int] = TFLEDModel(config=__UpperCAmelCase ).get_decoder()
_UpperCAmelCase : int = inputs_dict['''input_ids''']
_UpperCAmelCase : Dict = input_ids[:1, :]
_UpperCAmelCase : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase : Union[str, Any] = 1
# first forward pass
_UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=None , ):
if attention_mask is None:
_UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _UpperCAmelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
a__ =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
a__ =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
a__ =(
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ =True
a__ =False
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = TFLEDModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=__UpperCAmelCase )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = tf.zeros_like(inputs_dict['''attention_mask'''] )
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[str] = self.model_tester.seq_length
_UpperCAmelCase : List[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(A ):
_UpperCAmelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(A ):
_UpperCAmelCase : str = [t.numpy() for t in outputs.encoder_attentions]
_UpperCAmelCase : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = model_class(__UpperCAmelCase )
_UpperCAmelCase : int = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_UpperCAmelCase : Any = len(__UpperCAmelCase )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
if self.is_encoder_decoder:
_UpperCAmelCase : Any = model_class(__UpperCAmelCase )
_UpperCAmelCase : Any = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_decoder_attentions_output(__UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Any = model_class(__UpperCAmelCase )
_UpperCAmelCase : str = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
# Check attention is always last and order is fine
_UpperCAmelCase : int = True
_UpperCAmelCase : Dict = True
_UpperCAmelCase : str = model_class(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return tf.constant(UpperCamelCase_ , dtype=tf.intaa )
_lowerCAmelCase :List[Any] = 1E-4
@slow
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_UpperCAmelCase : Any = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_UpperCAmelCase : Union[str, Any] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_UpperCAmelCase : List[str] = prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase : Tuple = model(**__UpperCAmelCase )[0]
_UpperCAmelCase : List[Any] = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
_UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_UpperCAmelCase : List[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_UpperCAmelCase : str = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_UpperCAmelCase : List[Any] = prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = model(**__UpperCAmelCase )[0]
_UpperCAmelCase : Optional[Any] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
_UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-3 , rtol=1E-3 )
| 263 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
def _UpperCamelCase ( snake_case__ = 100 ) -> int:
__UpperCAmelCase : str = n * (n + 1) * (2 * n + 1) / 6
__UpperCAmelCase : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 157 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A__ = [1_4_4, 1_9_2, 2_4_0]
A__ = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
A__ = [9_6, 1_2_0, 1_4_4]
A__ = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
A__ = [6_4, 8_0, 9_6]
A__ = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
A__ = 0.05
A__ = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
A__ = 5_1_2
A__ = 1_6
A__ = 2_1
A__ = "pascal-voc-id2label.json"
else:
A__ = 1_0_0_0
A__ = "imagenet-1k-id2label.json"
A__ = "huggingface/label-files"
A__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
A__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
A__ = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
A__ = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
A__ = name.replace(".block." , "." )
if "exp_1x1" in name:
A__ = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
A__ = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
A__ = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
A__ = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
A__ = name.replace(".norm." , ".normalization." )
if ".conv." in name:
A__ = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
A__ = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
A__ = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
A__ = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
A__ = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
A__ = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
A__ = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
A__ = name.replace(F'.global_rep.{i}.weight' , ".layernorm.weight" )
if F'.global_rep.{i}.bias' in name:
A__ = name.replace(F'.global_rep.{i}.bias' , ".layernorm.bias" )
if ".global_rep." in name:
A__ = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
A__ = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
A__ = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
A__ = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
A__ = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
A__ = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
A__ = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
A__ = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
A__ = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
A__ = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
A__ = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
A__ = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
A__ = "mobilevit." + name
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Dict=False ) -> Dict:
'''simple docstring'''
if base_model:
A__ = ""
else:
A__ = "mobilevit."
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(UpperCamelCase_ )
if key[:8] == "encoder.":
A__ = key[8:]
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[0][6:] ) - 1
A__ = int(key_split[3] )
A__ = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
A__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A__ = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: str=False ) -> Tuple:
'''simple docstring'''
A__ = get_mobilevit_config(UpperCamelCase_ )
# load original state_dict
A__ = torch.load(UpperCamelCase_ , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
A__ = MobileViTForSemanticSegmentation(UpperCamelCase_ ).eval()
else:
A__ = MobileViTForImageClassification(UpperCamelCase_ ).eval()
A__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
A__ = image_processor(images=prepare_img() , return_tensors="pt" )
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
A__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
A__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
A__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
A__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
A__ = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
A__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase_ , organization="apple" )
model.push_to_hub(UpperCamelCase_ , organization="apple" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',"""
""" \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 68 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
from collections.abc import Sequence
from queue import Queue
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
a :Tuple = start
a :int = end
a :List[str] = val
a :List[str] = (start + end) // 2
a :Any = left
a :Tuple = right
def __repr__( self ):
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = collection
a :Union[str, Any] = function
if self.collection:
a :Optional[int] = self._build_tree(0 , len(__UpperCAmelCase ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
self._update_tree(self.root , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return self._query_range(self.root , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
if start == end:
return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.collection[start] )
a :Optional[Any] = (start + end) // 2
a :Optional[int] = self._build_tree(__UpperCAmelCase , __UpperCAmelCase )
a :List[str] = self._build_tree(mid + 1 , __UpperCAmelCase )
return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.fn(left.val , right.val ) , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if node.start == i and node.end == i:
a :Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left , __UpperCAmelCase , __UpperCAmelCase )
else:
self._update_tree(node.right , __UpperCAmelCase , __UpperCAmelCase )
a :Any = self.fn(node.left.val , node.right.val )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __UpperCAmelCase , __UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
if self.root is not None:
a :Dict = Queue()
queue.put(self.root )
while not queue.empty():
a :Union[str, Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
snake_case : Dict = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 94 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __A (_UpperCAmelCase):
'''simple docstring'''
__lowercase: List[Any] = """detr"""
__lowercase: str = ["""past_key_values"""]
__lowercase: List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[int] , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[str]=100 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : str=2_048 , UpperCAmelCase_ : Dict=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Union[str, Any]="sine" , UpperCAmelCase_ : Union[str, Any]="resnet50" , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[int]=0.1 , **UpperCAmelCase_ : Tuple , ) ->Tuple:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
snake_case_ , snake_case_ , snake_case_ = None, None, None
snake_case_ = use_timm_backbone
snake_case_ = backbone_config
snake_case_ = num_channels
snake_case_ = num_queries
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = encoder_layers
snake_case_ = auxiliary_loss
snake_case_ = position_embedding_type
snake_case_ = backbone
snake_case_ = use_pretrained_backbone
snake_case_ = dilation
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = mask_loss_coefficient
snake_case_ = dice_loss_coefficient
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
return self.d_model
@classmethod
def lowerCAmelCase ( cls : List[Any] , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
return cls(backbone_config=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ) ->Dict[str, any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
class __A (_UpperCAmelCase):
'''simple docstring'''
__lowercase: Dict = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCAmelCase ( self : Optional[Any] ) ->float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
return 12
| 347 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase :Any = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCamelCase :Any = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
__UpperCamelCase :List[str] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCamelCase :List[Any] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__UpperCAmelCase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__UpperCAmelCase))
__UpperCamelCase :Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__UpperCAmelCase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__UpperCAmelCase)
def UpperCamelCase__ ( self , **__lowercase) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCamelCase__ ( self) -> Any:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCamelCase :List[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = self.get_tokenizer()
__UpperCamelCase :Tuple = self.get_rust_tokenizer()
__UpperCamelCase :Any = self.get_image_processor()
__UpperCamelCase :Any = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCamelCase :int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCamelCase :Dict = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCamelCase :Optional[int] = self.get_image_processor(do_normalize=__UpperCAmelCase)
__UpperCamelCase :Dict = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __UpperCAmelCase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Any = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :Dict = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = self.prepare_image_inputs()
__UpperCamelCase :List[str] = image_processor(__UpperCAmelCase , return_tensors='''np''')
__UpperCamelCase :int = processor(images=__UpperCAmelCase , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :List[Any] = self.get_tokenizer()
__UpperCamelCase :Dict = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
__UpperCamelCase :List[Any] = '''lower newer'''
__UpperCamelCase :List[str] = processor(text=__UpperCAmelCase , return_tensors='''np''')
__UpperCamelCase :Tuple = tokenizer(__UpperCAmelCase , return_tensors='''np''')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[int] = self.get_image_processor()
__UpperCamelCase :str = self.get_tokenizer()
__UpperCamelCase :List[str] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
__UpperCamelCase :List[str] = '''lower newer'''
__UpperCamelCase :List[str] = self.prepare_image_inputs()
__UpperCamelCase :Optional[int] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[Any] = '''google/owlvit-base-patch32'''
__UpperCamelCase :Any = OwlViTProcessor.from_pretrained(__UpperCAmelCase)
__UpperCamelCase :List[Any] = ['''cat''', '''nasa badge''']
__UpperCamelCase :List[Any] = processor(text=__UpperCAmelCase)
__UpperCamelCase :Dict = 16
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Any = '''google/owlvit-base-patch32'''
__UpperCamelCase :str = OwlViTProcessor.from_pretrained(__UpperCAmelCase)
__UpperCamelCase :Any = [['''cat''', '''nasa badge'''], ['''person''']]
__UpperCamelCase :Tuple = processor(text=__UpperCAmelCase)
__UpperCamelCase :Tuple = 16
__UpperCamelCase :int = len(__UpperCAmelCase)
__UpperCamelCase :int = max([len(__UpperCAmelCase) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = '''google/owlvit-base-patch32'''
__UpperCamelCase :Union[str, Any] = OwlViTProcessor.from_pretrained(__UpperCAmelCase)
__UpperCamelCase :str = ['''cat''', '''nasa badge''']
__UpperCamelCase :str = processor(text=__UpperCAmelCase)
__UpperCamelCase :str = 16
__UpperCamelCase :List[Any] = inputs['''input_ids''']
__UpperCamelCase :Any = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :Any = self.get_tokenizer()
__UpperCamelCase :List[str] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = self.prepare_image_inputs()
__UpperCamelCase :List[Any] = self.prepare_image_inputs()
__UpperCamelCase :Union[str, Any] = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['''query_pixel_values''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.get_image_processor()
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :List[Any] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase)
__UpperCamelCase :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase :Tuple = processor.batch_decode(__UpperCAmelCase)
__UpperCamelCase :Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
| 43 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCAmelCase = None
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_UpperCAmelCase = """▁"""
# Segments (not really needed)
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = 3
_UpperCAmelCase = 4
class a ( _UpperCAmelCase ):
UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[Any] = 'left'
UpperCamelCase : int = XLNetTokenizer
def __init__( self : Optional[int] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=False , lowerCAmelCase : Dict="<s>" , lowerCAmelCase : Any="</s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : Optional[Any]="<sep>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : List[Any]="<cls>" , lowerCAmelCase : Dict="<mask>" , lowerCAmelCase : List[str]=["<eop>", "<eod>"] , **lowerCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: Tuple =3
SCREAMING_SNAKE_CASE_: List[Any] =do_lower_case
SCREAMING_SNAKE_CASE_: Optional[Any] =remove_space
SCREAMING_SNAKE_CASE_: Optional[int] =keep_accents
SCREAMING_SNAKE_CASE_: List[str] =vocab_file
SCREAMING_SNAKE_CASE_: int =False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : int = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: List[str] =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 173 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.