code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : int = """"""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase ( _A ):
"""simple docstring"""
return data[1:] + data[0]
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Any = """"""
for i in range(len(_A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = int("""0b""" + data[0] + data[-1], 2 )
__magic_name__ : List[str] = int("""0b""" + data[1:3], 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = message[:4]
__magic_name__ : Dict = message[4:]
__magic_name__ : int = apply_table(_A, _A )
__magic_name__ : Union[str, Any] = xor(_A, _A )
__magic_name__ : Tuple = apply_sbox(_A, temp[:4] ) # noqa: E741
__magic_name__ : int = apply_sbox(_A, temp[4:] )
__magic_name__ : Optional[int] = """0""" * (2 - len(_A )) + l # noqa: E741
__magic_name__ : Optional[Any] = """0""" * (2 - len(_A )) + r
__magic_name__ : Dict = apply_table(l + r, _A )
__magic_name__ : int = xor(_A, _A )
return temp + right
if __name__ == "__main__":
__magic_name__: List[Any] = input("Enter 10 bit key: ")
__magic_name__: Tuple = input("Enter 8 bit message: ")
__magic_name__: int = [6, 3, 7, 4, 8, 5, 10, 9]
__magic_name__: str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__magic_name__: List[Any] = [2, 4, 3, 1]
__magic_name__: Optional[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__magic_name__: Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
__magic_name__: Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__magic_name__: Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__magic_name__: Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__magic_name__: Optional[int] = apply_table(key, paa_table)
__magic_name__: List[Any] = temp[:5]
__magic_name__: int = temp[5:]
__magic_name__: Dict = left_shift(left)
__magic_name__: Optional[int] = left_shift(right)
__magic_name__: Union[str, Any] = apply_table(left + right, pa_table)
__magic_name__: Tuple = left_shift(left)
__magic_name__: Optional[int] = left_shift(right)
__magic_name__: Optional[int] = left_shift(left)
__magic_name__: Union[str, Any] = left_shift(right)
__magic_name__: Dict = apply_table(left + right, pa_table)
# encryption
__magic_name__: List[Any] = apply_table(message, IP)
__magic_name__: Tuple = function(expansion, sa, sa, keya, temp)
__magic_name__: Tuple = temp[4:] + temp[:4]
__magic_name__: Optional[int] = function(expansion, sa, sa, keya, temp)
__magic_name__: Optional[Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__magic_name__: List[Any] = apply_table(CT, IP)
__magic_name__: int = function(expansion, sa, sa, keya, temp)
__magic_name__: int = temp[4:] + temp[:4]
__magic_name__: Optional[Any] = function(expansion, sa, sa, keya, temp)
__magic_name__: Dict = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 342 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : List[str] = BlenderbotSmallTokenizer
lowercase__ : List[Any] = False
def __magic_name__ ( self ) -> List[str]:
super().setUp()
__magic_name__ : str = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__magic_name__ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Union[str, Any] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__magic_name__ : Union[str, Any] = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__magic_name__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : List[Any] = """adapt act apte"""
__magic_name__ : Optional[int] = """adapt act apte"""
return input_text, output_text
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : List[str] = """adapt act apte"""
__magic_name__ : Dict = ["""adapt""", """act""", """ap@@""", """te"""]
__magic_name__ : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__magic_name__ : Optional[int] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
__magic_name__ : Tuple = """I am a small frog."""
__magic_name__ : Union[str, Any] = tok([src_text] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
__magic_name__ : Dict = tok.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[Any] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
__magic_name__ : Any = """I am a small frog ."""
__magic_name__ : Any = """."""
__magic_name__ : Optional[Any] = tok(lowerCAmelCase__ )["""input_ids"""]
__magic_name__ : str = tok(lowerCAmelCase__ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 342 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__magic_name__: Union[str, Any] = ["gpt2"]
__magic_name__: Optional[Any] = "gpt2"
if is_tf_available():
class snake_case__ ( tf.Module ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
super().__init__()
__magic_name__ : int = tokenizer
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
__magic_name__ : Optional[int] = TFGPTaLMHeadModel.from_config(lowerCAmelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Union[str, Any] = self.tokenizer(lowerCAmelCase__ )
__magic_name__ : List[str] = tokenized["""input_ids"""].to_tensor()
__magic_name__ : List[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__magic_name__ : Dict = self.model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Dict:
super().setUp()
__magic_name__ : Union[str, Any] = [GPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__magic_name__ : Any = [TFGPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__magic_name__ : Dict = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__magic_name__ : Any = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __magic_name__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__magic_name__ : Tuple = tokenizer([test_inputs] , return_tensors="""tf""" )
__magic_name__ : Union[str, Any] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__magic_name__ : List[Any] = python_outputs[key].numpy()
__magic_name__ : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __magic_name__ ( self ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ : str = tf.function(lowerCAmelCase__ )
for test_inputs in self.test_sentences:
__magic_name__ : List[Any] = tf.constant(lowerCAmelCase__ )
__magic_name__ : int = compiled_tokenizer(lowerCAmelCase__ )
__magic_name__ : Tuple = tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __magic_name__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ : Any = ModelToSave(tokenizer=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ : List[str] = model.serving(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__magic_name__ : List[str] = Path(lowerCAmelCase__ ) / """saved.model"""
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"""serving_default""": model.serving} )
__magic_name__ : int = tf.saved_model.load(lowerCAmelCase__ )
__magic_name__ : List[str] = loaded_model.signatures["""serving_default"""](lowerCAmelCase__ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __magic_name__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ : List[Any] = tf_tokenizer(lowerCAmelCase__ ) # Build model with some sample inputs
__magic_name__ : List[Any] = tf_tokenizer.get_config()
__magic_name__ : Union[str, Any] = TFGPTaTokenizer.from_config(lowerCAmelCase__ )
__magic_name__ : str = model_from_config(lowerCAmelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __magic_name__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__magic_name__ : Any = 12_31_23
for max_length in [3, 5, 10_24]:
__magic_name__ : str = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ : List[str] = tf_tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
__magic_name__ : List[Any] = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 342 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = ['''image_processor''', '''tokenizer''']
lowercase__ : Union[str, Any] = '''LayoutLMv3ImageProcessor'''
lowercase__ : List[Any] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
__magic_name__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
__magic_name__ : str = kwargs.pop("""feature_extractor""" )
__magic_name__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
__magic_name__ : Tuple = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__magic_name__ : List[str] = features["""words"""]
__magic_name__ : Tuple = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel values
__magic_name__ : Union[str, Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
__magic_name__ : Union[str, Any] = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs["""overflow_to_sample_mapping"""] )
__magic_name__ : Any = images
return encoded_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__magic_name__ : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}' )
return images_with_overflow
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> int:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __magic_name__ ( self ) -> str:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __magic_name__ ( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase__ , )
return self.image_processor
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__magic_name__: str = pytest.mark.integration
@require_faiss
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[Any] = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(lowerCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __magic_name__ ( self ) -> List[str]:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda lowerCAmelCase__ , lowerCAmelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ )
__magic_name__ : Dict = dset.add_faiss_index("""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ ,__magic_name__ : str = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def __magic_name__ ( self ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ ,__magic_name__ : int = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __magic_name__ ( self ) -> Optional[int]:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ ,__magic_name__ : Dict = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(lowerCAmelCase__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def __magic_name__ ( self ) -> List[str]:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
__magic_name__ : List[Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : int = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
__magic_name__ : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Any = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self ) -> str:
import faiss
__magic_name__ : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : int = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Dict = 1
__magic_name__ ,__magic_name__ : int = index.search(lowerCAmelCase__ )
self.assertRaises(lowerCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ ,__magic_name__ : int = index.search_batch(lowerCAmelCase__ )
self.assertRaises(lowerCAmelCase__ , index.search_batch , queries[0] )
__magic_name__ : str = [scores[0] for scores in total_scores]
__magic_name__ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
import faiss
__magic_name__ : List[str] = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : Dict = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase__ ):
__magic_name__ : List[Any] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def __magic_name__ ( self ) -> Optional[Any]:
import faiss
__magic_name__ : int = faiss.IndexFlat(5 )
__magic_name__ : Dict = FaissIndex(custom_index=lowerCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __magic_name__ ( self ) -> str:
import faiss
__magic_name__ : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Union[str, Any] = 1
__magic_name__ ,__magic_name__ : Optional[int] = index.search(lowerCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase ( _A ):
"""simple docstring"""
import faiss
__magic_name__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__magic_name__ : List[Any] = """index.faiss"""
__magic_name__ : Tuple = f'mock://{index_name}'
index.save(_A, storage_options=mockfs.storage_options )
__magic_name__ : str = FaissIndex.load(_A, storage_options=mockfs.storage_options )
__magic_name__ : List[str] = np.zeros(5, dtype=np.floataa )
__magic_name__ : Union[str, Any] = 1
__magic_name__ ,__magic_name__ : List[Any] = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self ) -> Dict:
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
__magic_name__ : str = Elasticsearch()
__magic_name__ : List[Any] = {"""acknowledged""": True}
__magic_name__ : Dict = ElasticSearchIndex(es_client=lowerCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
__magic_name__ : Tuple = """foo"""
__magic_name__ : Optional[int] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
__magic_name__ ,__magic_name__ : int = index.search(lowerCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : List[Any] = """foo"""
__magic_name__ : Optional[int] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
__magic_name__ ,__magic_name__ : Any = index.search(lowerCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : List[Any] = ["""foo""", """bar""", """foobar"""]
__magic_name__ : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
__magic_name__ ,__magic_name__ : Union[str, Any] = index.search_batch(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [scores[0] for scores in total_scores]
__magic_name__ : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase__ )
# batched queries with timeout
__magic_name__ : Any = ["""foo""", """bar""", """foobar"""]
__magic_name__ : List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
__magic_name__ ,__magic_name__ : Optional[Any] = index.search_batch(lowerCAmelCase__ , request_timeout=30 )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase__ )
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__: List[Any] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__magic_name__: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__: Any = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__magic_name__: List[Any] = 10
__magic_name__: Union[str, Any] = 256
def UpperCamelCase ( _A ):
"""simple docstring"""
if len(_A ) < MIN_NUM_TOKENS:
return None
__magic_name__ : List[Any] = MinHash(num_perm=_A )
for token in set(_A ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( _A ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_A ) if len(t.strip() ) > 0}
class snake_case__ :
def __init__( self , *,
lowerCAmelCase__ = 0.8_5 , ) -> Any:
__magic_name__ : Optional[int] = duplication_jaccard_threshold
__magic_name__ : Optional[int] = NUM_PERM
__magic_name__ : Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ : Any = defaultdict(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[List[Dict]]:
__magic_name__ : str = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ : Union[str, Any] = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
__magic_name__ : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Any = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Tuple = element
__magic_name__ : int = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( _A ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(_A, max_queue_size=10000 ), chunksize=100, ):
if data is not None:
yield data
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Any = DuplicationIndex(duplication_jaccard_threshold=_A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_A ) ), max_queue_size=100 ) ):
di.add(_A, _A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = get_tokens(_A )
__magic_name__ : str = get_tokens(_A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__: List[Any] = None
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = []
for elementa in cluster:
__magic_name__ : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__magic_name__ : List[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_A, _A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ : Union[str, Any] = 1
extremes.append(_A )
return extremes
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
global _shared_dataset
__magic_name__ : str = dataset
__magic_name__ : Optional[int] = []
__magic_name__ : Tuple = partial(_find_cluster_extremes_shared, jaccard_threshold=_A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_A, _A, ), total=len(_A ), ):
extremes_list.append(_A )
return extremes_list
def UpperCamelCase ( _A, _A = 0.85 ):
"""simple docstring"""
__magic_name__ : List[Any] = make_duplicate_clusters(_A, _A )
__magic_name__ : List[str] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__magic_name__ : Tuple = {}
__magic_name__ : Union[str, Any] = find_extremes(_A, _A, _A )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ : Tuple = element
__magic_name__ : Tuple = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ : Tuple = dataset.filter(lambda _A, _A : idx not in remove_indices, with_indices=_A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__magic_name__ : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(_A )}' )
print(f'Number of duplicate clusters: {len(_A )}' )
print(f'Files in duplicate cluster: {len(_A )}' )
print(f'Unique files in duplicate cluster: {len(_A )}' )
print(f'Filtered dataset size: {len(_A )}' )
return ds_filter, duplicate_clusters
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__magic_name__: Union[str, Any] = logging.getLogger()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__magic_name__ : Optional[int] = parser.parse_args()
return args.f
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self ) -> None:
__magic_name__ : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
__magic_name__ : List[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(lowerCAmelCase__ )
__magic_name__ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase__ )
__magic_name__ : Optional[int] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(lowerCAmelCase__ )
| 342 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__: Tuple = logging.get_logger(__name__)
__magic_name__: Optional[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : str = '''swin'''
lowercase__ : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=96 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 12, 24] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=32 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
__magic_name__ : Tuple = image_size
__magic_name__ : Tuple = patch_size
__magic_name__ : Any = num_channels
__magic_name__ : Union[str, Any] = embed_dim
__magic_name__ : int = depths
__magic_name__ : List[str] = len(lowerCAmelCase__ )
__magic_name__ : Optional[int] = num_heads
__magic_name__ : Any = window_size
__magic_name__ : Any = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : int = drop_path_rate
__magic_name__ : int = hidden_act
__magic_name__ : Dict = use_absolute_embeddings
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : str = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : Optional[int] = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
__magic_name__ : List[str] = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
__magic_name__ ,__magic_name__ : Dict = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Tuple = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 1e-4
| 342 |
from math import factorial
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_A, _A ) or not isinstance(_A, _A ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__magic_name__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__magic_name__ : Any = float(factorial(_A ) )
coefficient /= factorial(_A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 342 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase ( _A = 8 ):
"""simple docstring"""
__magic_name__ : Optional[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_A ) for _ in range(_A ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
i -= len(_A )
__magic_name__ : Optional[Any] = i // 3
__magic_name__ : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__magic_name__ : Optional[int] = (
chars_incl
+ random(_A, quotient + remainder )
+ random(_A, _A )
+ random(_A, _A )
)
__magic_name__ : str = list(_A )
shuffle(_A )
return "".join(_A )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return "".join(secrets.choice(_A ) for _ in range(_A ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
pass # Put your code here...
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
pass # Put your code here...
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
pass # Put your code here...
def UpperCamelCase ( _A, _A = 8 ):
"""simple docstring"""
if len(_A ) < min_length:
# Your Password must be at least 8 characters long
return False
__magic_name__ : int = any(char in ascii_uppercase for char in password )
__magic_name__ : List[Any] = any(char in ascii_lowercase for char in password )
__magic_name__ : List[Any] = any(char in digits for char in password )
__magic_name__ : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = int(input("""Please indicate the max length of your password: """ ).strip() )
__magic_name__ : Tuple = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""", password_generator(_A ) )
print(
"""Alternative Password generated:""", alternative_password_generator(_A, _A ), )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 342 |
from __future__ import annotations
def UpperCamelCase ( _A ): # This function is recursive
"""simple docstring"""
__magic_name__ : str = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__magic_name__ : Dict = array[0]
__magic_name__ : Optional[Any] = False
__magic_name__ : Tuple = 1
__magic_name__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[Any] = [element for element in array[i:] if element >= array[i]]
__magic_name__ : Dict = longest_subsequence(_A )
if len(_A ) > len(_A ):
__magic_name__ : Tuple = temp_array
else:
i += 1
__magic_name__ : Any = [element for element in array[1:] if element >= pivot]
__magic_name__ : Dict = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__magic_name__: Dict = TypeVar("T")
class snake_case__ ( Generic[T] ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Any | T = None
__magic_name__ : int = len(lowerCAmelCase__ )
__magic_name__ : list[T] = [any_type for _ in range(self.N )] + arr
__magic_name__ : Any = fnc
self.build()
def __magic_name__ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
__magic_name__ : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
p += self.N
__magic_name__ : str = v
while p > 1:
__magic_name__ : Tuple = p // 2
__magic_name__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> T | None: # noqa: E741
__magic_name__ ,__magic_name__ : Dict = l + self.N, r + self.N
__magic_name__ : T | None = None
while l <= r:
if l % 2 == 1:
__magic_name__ : Any = self.st[l] if res is None else self.fn(lowerCAmelCase__ , self.st[l] )
if r % 2 == 0:
__magic_name__ : Union[str, Any] = self.st[r] if res is None else self.fn(lowerCAmelCase__ , self.st[r] )
__magic_name__ ,__magic_name__ : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__magic_name__: int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__magic_name__: List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__magic_name__: List[Any] = SegmentTree(test_array, min)
__magic_name__: Dict = SegmentTree(test_array, max)
__magic_name__: Optional[int] = SegmentTree(test_array, lambda a, b: a + b)
def UpperCamelCase ( ):
"""simple docstring"""
for i in range(len(_A ) ):
for j in range(_A, len(_A ) ):
__magic_name__ : List[Any] = reduce(_A, test_array[i : j + 1] )
__magic_name__ : Optional[int] = reduce(_A, test_array[i : j + 1] )
__magic_name__ : Any = reduce(lambda _A, _A : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_A, _A )
assert max_range == max_segment_tree.query(_A, _A )
assert sum_range == sum_segment_tree.query(_A, _A )
test_all_segments()
for index, value in test_updates.items():
__magic_name__: List[str] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 342 |
import argparse
import os
import re
__magic_name__: Optional[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__magic_name__: Any = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__magic_name__: Tuple = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase ( _A, _A = False ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Any = f.read()
__magic_name__ : List[Any] = content.split("""\n""" )
__magic_name__ : List[str] = []
__magic_name__ : Union[str, Any] = 0
while line_idx < len(_A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__magic_name__ : Any = len(re.search(R"""^(\s*)\S""", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__magic_name__ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__magic_name__ : List[str] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__magic_name__ : Union[str, Any] = sorted(_A, key=lambda _A : _re_identifier.search(_A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(_A ) )
elif "\n".join(_A ) != content:
return True
def UpperCamelCase ( _A = False ):
"""simple docstring"""
__magic_name__ : Any = [os.path.join(_A, _A ) for f in os.listdir(_A ) if f.endswith(""".py""" )]
__magic_name__ : List[str] = [sort_auto_mapping(_A, overwrite=_A ) for fname in fnames]
if not overwrite and any(_A ):
__magic_name__ : Optional[Any] = [f for f, d in zip(_A, _A ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_A )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__magic_name__: List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342 | 1 |
from __future__ import annotations
from math import pi
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( _A, _A=7 ):
"""simple docstring"""
__magic_name__ : Any = None
if token is not None:
__magic_name__ : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__magic_name__ : Any = """636036"""
__magic_name__ : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__magic_name__ : List[Any] = requests.get(_A, headers=_A ).json()
return result["workflow_runs"]
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = get_daily_ci_runs(_A )
__magic_name__ : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__magic_name__ : int = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
__magic_name__ : Optional[Any] = get_artifacts_links(worflow_run_id=_A, token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__magic_name__ : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A, artifact_url=_A, output_dir=_A, token=_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
get_last_daily_ci_artifacts(_A, _A, _A )
__magic_name__ : List[str] = {}
for artifact_name in artifact_names:
__magic_name__ : Dict = os.path.join(_A, f'{artifact_name}.zip' )
if os.path.isfile(_A ):
__magic_name__ : Union[str, Any] = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
__magic_name__ : Tuple = f.read().decode("""UTF-8""" )
return results
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__magic_name__ : int = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
__magic_name__: Dict = input("Enter a string ").strip()
__magic_name__: Union[str, Any] = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 342 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
lowercase__ : Union[str, Any] = '''AutoImageProcessor'''
lowercase__ : Optional[Any] = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = self.image_processor
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__magic_name__ : Tuple = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
__magic_name__ : str = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
__magic_name__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> str:
return ["input_ids", "attention_mask", "pixel_values"]
| 342 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case__ :
lowercase__ : int
lowercase__ : Node | None = None
lowercase__ : Node | None = None
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = Node(1 )
__magic_name__ : int = Node(2 )
__magic_name__ : Any = Node(3 )
__magic_name__ : List[Any] = Node(4 )
__magic_name__ : Optional[Any] = Node(5 )
return tree
def UpperCamelCase ( _A ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase ( _A ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase ( _A ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase ( _A ):
"""simple docstring"""
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[Any] = []
if root is None:
return output
__magic_name__ : str = deque([root] )
while process_queue:
__magic_name__ : Union[str, Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : list[Any] = []
def populate_output(_A, _A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(_A, _A )
return output
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : list[Any] = []
def populate_output(_A, _A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(_A, _A )
return output
def UpperCamelCase ( _A ):
"""simple docstring"""
if root is None:
return []
__magic_name__ : list[Sequence[Node | None]] = []
__magic_name__ : str = 0
__magic_name__ : Tuple = height(_A )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_A, _A ) )
__magic_name__ : str = 1
else:
output.append(get_nodes_from_right_to_left(_A, _A ) )
__magic_name__ : Optional[int] = 0
return output
def UpperCamelCase ( ): # Main function for testing.
"""simple docstring"""
__magic_name__ : str = make_tree()
print(f'In-order Traversal: {inorder(_A )}' )
print(f'Pre-order Traversal: {preorder(_A )}' )
print(f'Post-order Traversal: {postorder(_A )}', """\n""" )
print(f'Height of Tree: {height(_A )}', """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_A ), """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1, height(_A ) + 1 ):
print(f'Level {level}:', get_nodes_from_left_to_right(_A, level=_A ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 342 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = '''detr'''
lowercase__ : Union[str, Any] = ['''past_key_values''']
lowercase__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=1_00 , lowerCAmelCase__=6 , lowerCAmelCase__=20_48 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=20_48 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__magic_name__ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[int] = backbone_config.get("""model_type""" )
__magic_name__ : List[str] = CONFIG_MAPPING[backbone_model_type]
__magic_name__ : List[str] = config_class.from_dict(lowerCAmelCase__ )
# set timm attributes to None
__magic_name__ ,__magic_name__ ,__magic_name__ : Optional[Any] = None, None, None
__magic_name__ : Any = use_timm_backbone
__magic_name__ : Any = backbone_config
__magic_name__ : Tuple = num_channels
__magic_name__ : Any = num_queries
__magic_name__ : Dict = d_model
__magic_name__ : int = encoder_ffn_dim
__magic_name__ : Optional[int] = encoder_layers
__magic_name__ : int = encoder_attention_heads
__magic_name__ : List[str] = decoder_ffn_dim
__magic_name__ : List[str] = decoder_layers
__magic_name__ : int = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : int = attention_dropout
__magic_name__ : Dict = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Dict = init_std
__magic_name__ : Union[str, Any] = init_xavier_std
__magic_name__ : int = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Dict = encoder_layers
__magic_name__ : Any = auxiliary_loss
__magic_name__ : Any = position_embedding_type
__magic_name__ : List[str] = backbone
__magic_name__ : Union[str, Any] = use_pretrained_backbone
__magic_name__ : List[str] = dilation
# Hungarian matcher
__magic_name__ : Optional[int] = class_cost
__magic_name__ : List[str] = bbox_cost
__magic_name__ : Any = giou_cost
# Loss coefficients
__magic_name__ : str = mask_loss_coefficient
__magic_name__ : Any = dice_loss_coefficient
__magic_name__ : Dict = bbox_loss_coefficient
__magic_name__ : str = giou_loss_coefficient
__magic_name__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> int:
return self.encoder_attention_heads
@property
def __magic_name__ ( self ) -> int:
return self.d_model
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return cls(backbone_config=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Dict[str, any]:
__magic_name__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__magic_name__ : List[Any] = self.backbone_config.to_dict()
__magic_name__ : Union[str, Any] = self.__class__.model_type
return output
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 1e-5
@property
def __magic_name__ ( self ) -> int:
return 12
| 342 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCamelCase ( _A, _A, _A = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_A ), magnitude * sin(_A )]
return [magnitude * cos(radians(_A ) ), magnitude * sin(radians(_A ) )]
def UpperCamelCase ( _A, _A, _A = 10**-1 ):
"""simple docstring"""
__magic_name__ : NDArray[floataa] = cross(_A, _A )
__magic_name__ : float = sum(_A )
return abs(_A ) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__: Tuple = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__magic_name__: NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__: Tuple = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__magic_name__: List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__: Optional[Any] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
__magic_name__: Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 342 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__magic_name__: Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , *lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Any:
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Tuple = eval_examples
__magic_name__ : Tuple = post_process_function
__magic_name__ : Dict = quant_trainer_args
__magic_name__ : Dict = 1_28 # default number of calibration samples
def __magic_name__ ( self , lowerCAmelCase__=None ) -> Dict:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
__magic_name__ : Optional[Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
__magic_name__ : List[Any] = self._remove_unused_columns(lowerCAmelCase__ , description="""Calibration""" )
return DataLoader(
lowerCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCAmelCase__ , )
def __magic_name__ ( self , lowerCAmelCase__=None ) -> str:
__magic_name__ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
__magic_name__ : List[Any] = self.get_calib_dataloader(lowerCAmelCase__ )
__magic_name__ : Any = self.model
quant_trainer.configure_model(lowerCAmelCase__ , self.quant_trainer_args , calib=lowerCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(lowerCAmelCase__ )
logger.info("""***** Running calibration *****""" )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(lowerCAmelCase__ ):
# Prediction step
__magic_name__ ,__magic_name__ ,__magic_name__ : Optional[int] = self.prediction_step(lowerCAmelCase__ , lowerCAmelCase__ , prediction_loss_only=lowerCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCAmelCase__ , self.quant_trainer_args )
__magic_name__ : List[Any] = model
def __magic_name__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = "eval" ) -> List[str]:
__magic_name__ : str = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ : str = self.get_eval_dataloader(lowerCAmelCase__ )
__magic_name__ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ : Dict = self.compute_metrics
__magic_name__ : Optional[Any] = None
__magic_name__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ : Any = eval_loop(
lowerCAmelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , )
finally:
__magic_name__ : Union[str, Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__magic_name__ : Optional[int] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions )
__magic_name__ : List[Any] = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
__magic_name__ : Tuple = metrics.pop(lowerCAmelCase__ )
self.log(lowerCAmelCase__ )
else:
__magic_name__ : Optional[int] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase__ )
return metrics
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__ = "test" ) -> Optional[Any]:
__magic_name__ : Optional[int] = self.get_test_dataloader(lowerCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ : Union[str, Any] = self.compute_metrics
__magic_name__ : str = None
__magic_name__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__magic_name__ : List[Any] = eval_loop(
lowerCAmelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , )
finally:
__magic_name__ : List[str] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ : Optional[int] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions , """predict""" )
__magic_name__ : str = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
__magic_name__ : List[Any] = metrics.pop(lowerCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__="./" ) -> str:
__magic_name__ : List[str] = self.eval_dataset
__magic_name__ : Optional[Any] = self.get_eval_dataloader(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = next(iter(lowerCAmelCase__ ) )
# saving device - to make it consistent
__magic_name__ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
__magic_name__ : Tuple = tuple(v.to(lowerCAmelCase__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
__magic_name__ : Tuple = True
__magic_name__ : Union[str, Any] = self.model.to(lowerCAmelCase__ )
model.eval()
model.float()
__magic_name__ : List[str] = model.module if hasattr(lowerCAmelCase__ , """module""" ) else model
quant_trainer.configure_model(lowerCAmelCase__ , self.quant_trainer_args )
__magic_name__ : List[str] = os.path.join(lowerCAmelCase__ , """model.onnx""" )
logger.info(F'exporting model to {output_model_file}' )
__magic_name__ : List[str] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , export_params=lowerCAmelCase__ , opset_version=13 , do_constant_folding=lowerCAmelCase__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowerCAmelCase__ , )
logger.info("""onnx export finished""" )
| 342 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 1 |
class snake_case__ :
def __init__( self ) -> Optional[Any]:
__magic_name__ : Optional[Any] = {}
def __magic_name__ ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(lowerCAmelCase__ , """ -> """ , """ -> """.join([str(lowerCAmelCase__ ) for j in self.vertex[i]] ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCAmelCase__ )
else:
# else make a new vertex
__magic_name__ : Dict = [to_vertex]
def __magic_name__ ( self ) -> None:
# visited array for storing already visited nodes
__magic_name__ : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
# mark start vertex as visited
__magic_name__ : Union[str, Any] = True
print(lowerCAmelCase__ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__magic_name__: Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 342 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : int = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Dict = patch_size
__magic_name__ : Tuple = num_frames
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = attention_type
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__magic_name__ : Optional[Any] = self.num_labels
return config
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
# verify the logits shape
__magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = TimesformerModelTester(self )
__magic_name__ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
__magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
for model_class in self.all_model_classes:
__magic_name__ : Tuple = self.model_tester.seq_length
__magic_name__ : int = self.model_tester.num_frames
__magic_name__ : Any = True
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = True
__magic_name__ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__magic_name__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
__magic_name__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
__magic_name__ : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase__ )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Any = prepare_video()
__magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 | 1 |
import numpy as np
import qiskit
def UpperCamelCase ( _A = 8, _A = None ):
"""simple docstring"""
__magic_name__ : str = np.random.default_rng(seed=_A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__magic_name__ : Union[str, Any] = 6 * key_len
# Measurement basis for Alice's qubits.
__magic_name__ : List[str] = rng.integers(2, size=_A )
# The set of states Alice will prepare.
__magic_name__ : Dict = rng.integers(2, size=_A )
# Measurement basis for Bob's qubits.
__magic_name__ : Optional[Any] = rng.integers(2, size=_A )
# Quantum Circuit to simulate BB84
__magic_name__ : int = qiskit.QuantumCircuit(_A, name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_A ):
if alice_state[index] == 1:
bbaa_circ.x(_A )
if alice_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_A ):
if bob_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__magic_name__ : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__magic_name__ : Any = qiskit.execute(_A, _A, shots=1, seed_simulator=_A )
# Returns the result of measurement.
__magic_name__ : List[Any] = job.result().get_counts(_A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__magic_name__ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_A, _A, _A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__magic_name__ : Optional[int] = gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A, """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = {}
__magic_name__ : Optional[int] = tokenizer(example["""content"""], truncation=_A )["""input_ids"""]
__magic_name__ : List[str] = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__magic_name__: Optional[Any] = HfArgumentParser(PretokenizationArguments)
__magic_name__: List[Any] = parser.parse_args()
if args.num_workers is None:
__magic_name__: str = multiprocessing.cpu_count()
__magic_name__: Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__magic_name__: Any = time.time()
__magic_name__: int = load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__magic_name__: Any = time.time()
__magic_name__: Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__magic_name__: List[str] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 342 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[2, 2, 3, 2] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=None , ) -> Optional[int]:
__magic_name__ : Dict = parent
__magic_name__ : int = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Union[str, Any] = num_stages
__magic_name__ : List[str] = hidden_sizes
__magic_name__ : Tuple = depths
__magic_name__ : Optional[int] = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : Dict = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Tuple = num_labels
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Any = out_features
__magic_name__ : str = out_indices
__magic_name__ : List[str] = scope
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : List[Any] = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Dict = ConvNextVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Any = model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : Optional[Any] = ConvNextVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
__magic_name__ : str = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__magic_name__ : Any = None
__magic_name__ : Dict = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[str] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Any = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Any = config_and_inputs
__magic_name__ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : str = config_and_inputs
__magic_name__ : Dict = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Union[str, Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = False
lowercase__ : List[str] = False
lowercase__ : Tuple = False
lowercase__ : Optional[int] = False
lowercase__ : Tuple = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = ConvNextVaModelTester(self )
__magic_name__ : Any = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> Dict:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __magic_name__ ( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __magic_name__ ( self ) -> Any:
pass
def __magic_name__ ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__magic_name__ ,__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__magic_name__ : Any = True
if model_class.__name__ in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]:
continue
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__magic_name__ : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__magic_name__ : Dict = model(**lowerCAmelCase__ ).loss
loss.backward()
def __magic_name__ ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
__magic_name__ : Optional[Any] = False
__magic_name__ : Optional[Any] = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
__magic_name__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__magic_name__ : Dict = model(**lowerCAmelCase__ ).loss
loss.backward()
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Any = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = ConvNextVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCAmelCase__ )
__magic_name__ : List[str] = self.default_image_processor
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : str = preprocessor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : str = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: int = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''poolformer'''
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=4.0 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[64, 1_28, 3_20, 5_12] , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[2, 1, 1, 1] , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=True , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.0_2 , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : int = patch_size
__magic_name__ : Any = stride
__magic_name__ : Tuple = padding
__magic_name__ : Dict = pool_size
__magic_name__ : int = hidden_sizes
__magic_name__ : Any = mlp_ratio
__magic_name__ : Tuple = depths
__magic_name__ : Optional[int] = patch_sizes
__magic_name__ : Dict = strides
__magic_name__ : Optional[int] = num_encoder_blocks
__magic_name__ : Optional[int] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Any = use_layer_scale
__magic_name__ : Any = layer_scale_init_value
__magic_name__ : str = initializer_range
super().__init__(**lowerCAmelCase__ )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 2e-3
| 342 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = "layer_norm" , lowerCAmelCase__ = False , ) -> Dict:
super().__init__()
__magic_name__ : Optional[int] = only_cross_attention
__magic_name__ : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__magic_name__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__magic_name__ : Tuple = AdaLayerNorm(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.use_ada_layer_norm_zero:
__magic_name__ : Dict = AdaLayerNormZero(lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : List[str] = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
__magic_name__ : str = Attention(
query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , dropout=lowerCAmelCase__ , bias=lowerCAmelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowerCAmelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__magic_name__ : List[Any] = (
AdaLayerNorm(lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
)
__magic_name__ : int = Attention(
query_dim=lowerCAmelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , dropout=lowerCAmelCase__ , bias=lowerCAmelCase__ , upcast_attention=lowerCAmelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
__magic_name__ : List[Any] = None
__magic_name__ : int = None
# 3. Feed-forward
__magic_name__ : Tuple = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = FeedForward(lowerCAmelCase__ , dropout=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , final_dropout=lowerCAmelCase__ )
# let chunk size default to None
__magic_name__ : Optional[int] = None
__magic_name__ : str = 0
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
# Sets chunk feed-forward
__magic_name__ : List[str] = chunk_size
__magic_name__ : Dict = dim
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__magic_name__ : int = self.norma(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.use_ada_layer_norm_zero:
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : str = self.norma(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hidden_dtype=hidden_states.dtype )
else:
__magic_name__ : Any = self.norma(lowerCAmelCase__ )
__magic_name__ : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__magic_name__ : List[str] = self.attna(
lowerCAmelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.use_ada_layer_norm_zero:
__magic_name__ : Optional[Any] = gate_msa.unsqueeze(1 ) * attn_output
__magic_name__ : Optional[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__magic_name__ : Union[str, Any] = (
self.norma(lowerCAmelCase__ , lowerCAmelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCAmelCase__ )
)
__magic_name__ : Union[str, Any] = self.attna(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
__magic_name__ : List[str] = self.norma(lowerCAmelCase__ )
if self.use_ada_layer_norm_zero:
__magic_name__ : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__magic_name__ : Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__magic_name__ : Tuple = torch.cat(
[self.ff(lowerCAmelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCAmelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__magic_name__ : Optional[int] = self.ff(lowerCAmelCase__ )
if self.use_ada_layer_norm_zero:
__magic_name__ : Any = gate_mlp.unsqueeze(1 ) * ff_output
__magic_name__ : Union[str, Any] = ff_output + hidden_states
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = False , ) -> Union[str, Any]:
super().__init__()
__magic_name__ : int = int(dim * mult )
__magic_name__ : Any = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__magic_name__ : int = GELU(lowerCAmelCase__ , lowerCAmelCase__ )
if activation_fn == "gelu-approximate":
__magic_name__ : int = GELU(lowerCAmelCase__ , lowerCAmelCase__ , approximate="""tanh""" )
elif activation_fn == "geglu":
__magic_name__ : int = GEGLU(lowerCAmelCase__ , lowerCAmelCase__ )
elif activation_fn == "geglu-approximate":
__magic_name__ : int = ApproximateGELU(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Dict = nn.ModuleList([] )
# project in
self.net.append(lowerCAmelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCAmelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
for module in self.net:
__magic_name__ : List[Any] = module(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = "none" ) -> Any:
super().__init__()
__magic_name__ : int = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = approximate
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Dict = self.proj(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.gelu(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
super().__init__()
__magic_name__ : Dict = nn.Linear(lowerCAmelCase__ , dim_out * 2 )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
if gate.device.type != "mps":
return F.gelu(lowerCAmelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ ,__magic_name__ : Dict = self.proj(lowerCAmelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowerCAmelCase__ )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
super().__init__()
__magic_name__ : List[str] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Any = self.proj(lowerCAmelCase__ )
return x * torch.sigmoid(1.7_0_2 * x )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__()
__magic_name__ : Optional[int] = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Tuple = nn.SiLU()
__magic_name__ : Optional[int] = nn.Linear(lowerCAmelCase__ , embedding_dim * 2 )
__magic_name__ : Tuple = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : List[Any] = self.linear(self.silu(self.emb(lowerCAmelCase__ ) ) )
__magic_name__ ,__magic_name__ : List[Any] = torch.chunk(lowerCAmelCase__ , 2 )
__magic_name__ : str = self.norm(lowerCAmelCase__ ) * (1 + scale) + shift
return x
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
super().__init__()
__magic_name__ : int = CombinedTimestepLabelEmbeddings(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = nn.SiLU()
__magic_name__ : Optional[int] = nn.Linear(lowerCAmelCase__ , 6 * embedding_dim , bias=lowerCAmelCase__ )
__magic_name__ : str = nn.LayerNorm(lowerCAmelCase__ , elementwise_affine=lowerCAmelCase__ , eps=1e-6 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Union[str, Any]:
__magic_name__ : Any = self.linear(self.silu(self.emb(lowerCAmelCase__ , lowerCAmelCase__ , hidden_dtype=lowerCAmelCase__ ) ) )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : int = emb.chunk(6 , dim=1 )
__magic_name__ : Union[str, Any] = self.norm(lowerCAmelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 1e-5 ) -> int:
super().__init__()
__magic_name__ : Optional[int] = num_groups
__magic_name__ : Optional[Any] = eps
if act_fn is None:
__magic_name__ : Dict = None
else:
__magic_name__ : List[str] = get_activation(lowerCAmelCase__ )
__magic_name__ : int = nn.Linear(lowerCAmelCase__ , out_dim * 2 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
if self.act:
__magic_name__ : Tuple = self.act(lowerCAmelCase__ )
__magic_name__ : Tuple = self.linear(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = emb[:, :, None, None]
__magic_name__ ,__magic_name__ : Optional[Any] = emb.chunk(2 , dim=1 )
__magic_name__ : Tuple = F.group_norm(lowerCAmelCase__ , self.num_groups , eps=self.eps )
__magic_name__ : Tuple = x * (1 + scale) + shift
return x
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCamelCase ( _A, _A, _A=1e-12 ):
"""simple docstring"""
__magic_name__ : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(_A, axis=1 ), a_min=_A ) ).T
__magic_name__ : Optional[int] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(_A, axis=1 ), a_min=_A ) ).T
return jnp.matmul(_A, norm_emb_a.T )
class snake_case__ ( nn.Module ):
lowercase__ : CLIPConfig
lowercase__ : jnp.dtype = jnp.floataa
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
__magic_name__ : str = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase__ , dtype=self.dtype )
__magic_name__ : Dict = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__magic_name__ : Dict = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__magic_name__ : Union[str, Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
__magic_name__ : str = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1]
__magic_name__ : Any = self.visual_projection(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = jax_cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__magic_name__ : Union[str, Any] = jax_cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__magic_name__ : str = 0.0
__magic_name__ : int = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__magic_name__ : Any = jnp.round(lowerCAmelCase__ , 3 )
__magic_name__ : List[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase__ )
# Use a lower threshold if an image has any special care concept
__magic_name__ : List[Any] = is_special_care * 0.0_1
__magic_name__ : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__magic_name__ : str = jnp.round(lowerCAmelCase__ , 3 )
__magic_name__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = CLIPConfig
lowercase__ : Tuple = '''clip_input'''
lowercase__ : Tuple = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = jnp.floataa , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Tuple:
if input_shape is None:
__magic_name__ : str = (1, 2_24, 2_24, 3)
__magic_name__ : str = self.module_class(config=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **lowerCAmelCase__ )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , input_shape=lowerCAmelCase__ , seed=lowerCAmelCase__ , dtype=lowerCAmelCase__ , _do_init=_do_init )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> FrozenDict:
# init input tensor
__magic_name__ : int = jax.random.normal(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Optional[Any] = jax.random.split(lowerCAmelCase__ )
__magic_name__ : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
__magic_name__ : Any = self.module.init(lowerCAmelCase__ , lowerCAmelCase__ )["""params"""]
return random_params
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int:
__magic_name__ : Union[str, Any] = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__magic_name__: Dict = logging.get_logger(__name__)
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[Any] = (UnCLIPScheduler,)
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__ ( self ) -> Tuple:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Tuple = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__magic_name__ : Tuple = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(variance_type="""learned_range""" )
__magic_name__ : Any = scheduler_class(**lowerCAmelCase__ )
__magic_name__ : List[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=lowerCAmelCase__ ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=lowerCAmelCase__ ) - -0.0_0_1_0_0_1_1 < 1e-5
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.scheduler_classes[0]
__magic_name__ : str = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
__magic_name__ : Tuple = scheduler.timesteps
__magic_name__ : Tuple = self.dummy_model()
__magic_name__ : Optional[int] = self.dummy_sample_deter
__magic_name__ : Tuple = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
__magic_name__ : Optional[Any] = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : int = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(25 )
__magic_name__ : str = scheduler.timesteps
__magic_name__ : Any = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
if i + 1 == timesteps.shape[0]:
__magic_name__ : Optional[int] = None
else:
__magic_name__ : Tuple = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def __magic_name__ ( self ) -> Any:
pass
def __magic_name__ ( self ) -> int:
pass
| 342 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__magic_name__: List[Any] = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Optional[int] = """A painting of a squirrel eating a burger """
__magic_name__ : int = torch.manual_seed(0 )
__magic_name__ : Optional[int] = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
__magic_name__ : str = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = generator.manual_seed(0 )
__magic_name__ : Any = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Tuple = """A painting of a squirrel eating a burger """
__magic_name__ : List[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = pipe(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__magic_name__ : int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__magic_name__ : Union[str, Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__magic_name__: List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowercase__ : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowercase__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowercase__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowerCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[Any] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Dict = v.to_dict()
return d
| 342 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : str = TapasConfig.from_json_file(_A )
# set absolute/relative position embeddings parameter
__magic_name__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__magic_name__ : List[Any] = TapasForQuestionAnswering(config=_A )
elif task == "WTQ":
# run_task_main.py hparams
__magic_name__ : Any = 4
__magic_name__ : Union[str, Any] = True
# hparam_utils.py hparams
__magic_name__ : List[str] = 0.664694
__magic_name__ : List[Any] = 0.207951
__magic_name__ : Tuple = 0.121194
__magic_name__ : List[str] = True
__magic_name__ : List[str] = True
__magic_name__ : Union[str, Any] = False
__magic_name__ : Dict = 0.0352513
__magic_name__ : List[str] = TapasForQuestionAnswering(config=_A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__magic_name__ : List[str] = 4
__magic_name__ : Dict = False
# hparam_utils.py hparams
__magic_name__ : List[str] = 36.4519
__magic_name__ : Dict = 0.903421
__magic_name__ : Tuple = 222.088
__magic_name__ : Dict = True
__magic_name__ : Tuple = True
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = 0.763141
__magic_name__ : Dict = TapasForQuestionAnswering(config=_A )
elif task == "TABFACT":
__magic_name__ : List[Any] = TapasForSequenceClassification(config=_A )
elif task == "MLM":
__magic_name__ : Dict = TapasForMaskedLM(config=_A )
elif task == "INTERMEDIATE_PRETRAINING":
__magic_name__ : int = TapasModel(config=_A )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_A, _A, _A )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
__magic_name__ : List[str] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""", model_max_length=512 )
tokenizer.save_pretrained(_A )
print("""Used relative position embeddings:""", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__magic_name__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 342 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = ['''image_processor''', '''tokenizer''']
lowercase__ : str = '''BlipImageProcessor'''
lowercase__ : Any = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
# add QFormer tokenizer
__magic_name__ : Any = qformer_tokenizer
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
__magic_name__ : Dict = BatchFeature()
if text is not None:
__magic_name__ : int = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
encoding.update(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.qformer_tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Optional[Any] = qformer_text_encoding.pop("""input_ids""" )
__magic_name__ : Dict = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
__magic_name__ : Optional[int] = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = self.tokenizer.model_input_names
__magic_name__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __magic_name__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
if os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__magic_name__ : List[Any] = os.path.join(lowerCAmelCase__ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase__ )
return super().save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ , subfolder="""qformer_tokenizer""" )
__magic_name__ : str = cls._get_arguments_from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
args.append(lowerCAmelCase__ )
return cls(*lowerCAmelCase__ )
| 342 |
from math import factorial
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_A, _A ) or not isinstance(_A, _A ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__magic_name__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__magic_name__ : Any = float(factorial(_A ) )
coefficient /= factorial(_A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 342 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=4 , ) -> Optional[int]:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : List[str] = seq_length
__magic_name__ : Optional[int] = is_training
__magic_name__ : List[Any] = use_attention_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : List[Any] = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Any = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : int = num_choices
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase__ , )
return config, input_ids, attention_mask
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Tuple = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[Any] = FlaxDistilBertModelTester(self )
@slow
def __magic_name__ ( self ) -> str:
for model_class_name in self.all_model_classes:
__magic_name__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
__magic_name__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Dict:
__magic_name__ : List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__magic_name__ : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__magic_name__ : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__magic_name__ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__magic_name__ : Any = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCAmelCase__ )
__magic_name__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 |
from __future__ import annotations
def UpperCamelCase ( _A ): # This function is recursive
"""simple docstring"""
__magic_name__ : str = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__magic_name__ : Dict = array[0]
__magic_name__ : Optional[Any] = False
__magic_name__ : Tuple = 1
__magic_name__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[Any] = [element for element in array[i:] if element >= array[i]]
__magic_name__ : Dict = longest_subsequence(_A )
if len(_A ) > len(_A ):
__magic_name__ : Tuple = temp_array
else:
i += 1
__magic_name__ : Any = [element for element in array[1:] if element >= pivot]
__magic_name__ : Dict = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__magic_name__: List[Any] = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__magic_name__: Tuple = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case__ :
def __init__( self ) -> Optional[int]:
__magic_name__ : Tuple = WATERMARK_BITS
__magic_name__ : Tuple = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
__magic_name__ : Tuple = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ : List[Any] = [self.encoder.encode(lowerCAmelCase__ , """dwtDct""" ) for image in images]
__magic_name__ : Tuple = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0 , 3 , 1 , 2 )
__magic_name__ : Tuple = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 342 |
import argparse
import os
import re
__magic_name__: Optional[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__magic_name__: Any = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__magic_name__: Tuple = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase ( _A, _A = False ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Any = f.read()
__magic_name__ : List[Any] = content.split("""\n""" )
__magic_name__ : List[str] = []
__magic_name__ : Union[str, Any] = 0
while line_idx < len(_A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__magic_name__ : Any = len(re.search(R"""^(\s*)\S""", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__magic_name__ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__magic_name__ : List[str] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__magic_name__ : Union[str, Any] = sorted(_A, key=lambda _A : _re_identifier.search(_A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(_A ) )
elif "\n".join(_A ) != content:
return True
def UpperCamelCase ( _A = False ):
"""simple docstring"""
__magic_name__ : Any = [os.path.join(_A, _A ) for f in os.listdir(_A ) if f.endswith(""".py""" )]
__magic_name__ : List[str] = [sort_auto_mapping(_A, overwrite=_A ) for fname in fnames]
if not overwrite and any(_A ):
__magic_name__ : Optional[Any] = [f for f, d in zip(_A, _A ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_A )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__magic_name__: List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: List[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__: int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__magic_name__: str = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : int = bs[:]
__magic_name__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : Union[str, Any] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = set()
__magic_name__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Any = json.load(lowerCAmelCase__ )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : int = bytes_to_unicode()
__magic_name__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : int = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : Dict = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Optional[int] = {}
__magic_name__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Optional[Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__ ( self ) -> int:
return len(self.encoder )
def __magic_name__ ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : List[str] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : Optional[Any] = bigram
__magic_name__ : int = []
__magic_name__ : Optional[int] = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : Union[str, Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__magic_name__ : List[str] = """ """.join(lowerCAmelCase__ )
__magic_name__ : Dict = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Optional[int] = """""".join(lowerCAmelCase__ )
__magic_name__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : List[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> str:
__magic_name__ : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : Any = """ """ + text
return (text, kwargs)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
return token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
__magic_name__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
__magic_name__ : List[Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : Dict = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
__magic_name__ : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__: List[str] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = GPTSwaTokenizer
lowercase__ : Tuple = False
lowercase__ : List[Any] = True
lowercase__ : List[Any] = False
def __magic_name__ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : Any = GPTSwaTokenizer(lowerCAmelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Optional[Any] = """This is a test"""
__magic_name__ : Tuple = """This is a test"""
return input_text, output_text
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : List[str] = """<s>"""
__magic_name__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCAmelCase__ ) , 20_00 )
def __magic_name__ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[str] = GPTSwaTokenizer(lowerCAmelCase__ )
__magic_name__ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
__magic_name__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCAmelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__magic_name__ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
__magic_name__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
# fmt: off
self.assertListEqual(
lowerCAmelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : str = GPTSwaTokenizer(lowerCAmelCase__ )
__magic_name__ : List[str] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__magic_name__ : Union[str, Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertListEqual(tokenizer.encode_fast(lowerCAmelCase__ ) , lowerCAmelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(tokenizer.decode_fast(lowerCAmelCase__ ) , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__magic_name__ : Tuple = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCAmelCase__ , )
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__magic_name__ : int = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
__magic_name__: Dict = input("Enter a string ").strip()
__magic_name__: Union[str, Any] = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 342 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__magic_name__: int = open # noqa: we just need to have a builtin inside this module to test it properly
| 342 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Any = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__magic_name__ : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__ ( self ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
__magic_name__ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : Tuple = None
__magic_name__ : Any = None
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__magic_name__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 342 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
__magic_name__: Dict = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : int = '''tapas'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10_24 , lowerCAmelCase__=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-1_2 , lowerCAmelCase__=0 , lowerCAmelCase__=1_0.0 , lowerCAmelCase__=0 , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=1.0 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__="ratio" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=64 , lowerCAmelCase__=32 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = hidden_act
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_sizes
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
__magic_name__ : List[Any] = positive_label_weight
__magic_name__ : List[Any] = num_aggregation_labels
__magic_name__ : List[Any] = aggregation_loss_weight
__magic_name__ : Tuple = use_answer_as_supervision
__magic_name__ : Optional[int] = answer_loss_importance
__magic_name__ : str = use_normalized_answer_loss
__magic_name__ : int = huber_loss_delta
__magic_name__ : Dict = temperature
__magic_name__ : List[str] = aggregation_temperature
__magic_name__ : List[str] = use_gumbel_for_cells
__magic_name__ : Any = use_gumbel_for_aggregation
__magic_name__ : int = average_approximation_function
__magic_name__ : Tuple = cell_selection_preference
__magic_name__ : List[str] = answer_loss_cutoff
__magic_name__ : Any = max_num_rows
__magic_name__ : str = max_num_columns
__magic_name__ : Union[str, Any] = average_logits_per_cell
__magic_name__ : Optional[Any] = select_one_column
__magic_name__ : Union[str, Any] = allow_empty_column_selection
__magic_name__ : Union[str, Any] = init_cell_selection_weights_to_zero
__magic_name__ : Tuple = reset_position_index_per_cell
__magic_name__ : str = disable_per_token_loss
# Aggregation hyperparameters
__magic_name__ : Optional[int] = aggregation_labels
__magic_name__ : Dict = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase__ ):
__magic_name__ : Dict = {int(lowerCAmelCase__ ): v for k, v in aggregation_labels.items()}
| 342 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__magic_name__: Tuple = logging.get_logger(__name__)
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
def constraint_to_multiple_of(_A, _A, _A=0, _A=None ):
__magic_name__ : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__magic_name__ : List[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
__magic_name__ : Any = math.ceil(val / multiple ) * multiple
return x
__magic_name__ : int = (output_size, output_size) if isinstance(_A, _A ) else output_size
__magic_name__ ,__magic_name__ : Tuple = get_image_size(_A )
__magic_name__ ,__magic_name__ : Union[str, Any] = output_size
# determine new height and width
__magic_name__ : int = output_height / input_height
__magic_name__ : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__magic_name__ : List[str] = scale_width
else:
# fit height
__magic_name__ : Union[str, Any] = scale_height
__magic_name__ : Optional[int] = constraint_to_multiple_of(scale_height * input_height, multiple=_A )
__magic_name__ : int = constraint_to_multiple_of(scale_width * input_width, multiple=_A )
return (new_height, new_width)
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
__magic_name__ : int = size if size is not None else {"""height""": 3_84, """width""": 3_84}
__magic_name__ : str = get_size_dict(lowerCAmelCase__ )
__magic_name__ : Optional[int] = do_resize
__magic_name__ : Optional[int] = size
__magic_name__ : int = keep_aspect_ratio
__magic_name__ : Any = ensure_multiple_of
__magic_name__ : List[str] = resample
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : List[str] = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = 1 , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
__magic_name__ : Any = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__magic_name__ : int = get_resize_output_image_size(
lowerCAmelCase__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=lowerCAmelCase__ , multiple=lowerCAmelCase__ , )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
__magic_name__ : Any = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : str = get_size_dict(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__magic_name__ : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__magic_name__ : Tuple = resample if resample is not None else self.resample
__magic_name__ : str = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Dict = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Any = image_std if image_std is not None else self.image_std
__magic_name__ : Optional[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__magic_name__ : str = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__magic_name__ : Dict = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
__magic_name__ : Any = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__magic_name__ : str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__magic_name__ : Union[str, Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__magic_name__ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = target_sizes.numpy()
__magic_name__ : List[str] = []
for idx in range(len(lowerCAmelCase__ ) ):
__magic_name__ : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase__ )
__magic_name__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
__magic_name__ : Tuple = logits.argmax(dim=1 )
__magic_name__ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 342 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 1 |
from PIL import Image
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
def brightness(_A ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_A )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
__magic_name__: str = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 342 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__magic_name__: Optional[int] = datasets.utils.logging.get_logger(__name__)
__magic_name__: List[Any] = ["names", "prefix"]
__magic_name__: int = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__magic_name__: str = ["encoding_errors", "on_bad_lines"]
__magic_name__: Union[str, Any] = ["date_format"]
@dataclass
class snake_case__ ( datasets.BuilderConfig ):
lowercase__ : str = ","
lowercase__ : Optional[str] = None
lowercase__ : Optional[Union[int, List[int], str]] = "infer"
lowercase__ : Optional[List[str]] = None
lowercase__ : Optional[List[str]] = None
lowercase__ : Optional[Union[int, str, List[int], List[str]]] = None
lowercase__ : Optional[Union[List[int], List[str]]] = None
lowercase__ : Optional[str] = None
lowercase__ : bool = True
lowercase__ : Optional[Literal["c", "python", "pyarrow"]] = None
lowercase__ : Dict[Union[int, str], Callable[[Any], Any]] = None
lowercase__ : Optional[list] = None
lowercase__ : Optional[list] = None
lowercase__ : bool = False
lowercase__ : Optional[Union[int, List[int]]] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[Union[str, List[str]]] = None
lowercase__ : bool = True
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = True
lowercase__ : Optional[str] = None
lowercase__ : str = "."
lowercase__ : Optional[str] = None
lowercase__ : str = '"'
lowercase__ : int = 0
lowercase__ : Optional[str] = None
lowercase__ : Optional[str] = None
lowercase__ : Optional[str] = None
lowercase__ : Optional[str] = None
lowercase__ : bool = True
lowercase__ : bool = True
lowercase__ : int = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : Optional[str] = None
lowercase__ : int = 10000
lowercase__ : Optional[datasets.Features] = None
lowercase__ : Optional[str] = "strict"
lowercase__ : Literal["error", "warn", "skip"] = "error"
lowercase__ : Optional[str] = None
def __magic_name__ ( self ) -> Optional[Any]:
if self.delimiter is not None:
__magic_name__ : Optional[int] = self.delimiter
if self.column_names is not None:
__magic_name__ : List[Any] = self.column_names
@property
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case__ ( datasets.ArrowBasedBuilder ):
lowercase__ : List[Any] = CsvConfig
def __magic_name__ ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__magic_name__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
__magic_name__ : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[int] = [files]
__magic_name__ : Dict = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__magic_name__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Tuple = [files]
__magic_name__ : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def __magic_name__ ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
__magic_name__ : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__magic_name__ : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__magic_name__ : List[Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__magic_name__ : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
__magic_name__ : Dict = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}' )
raise
| 342 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : int = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Dict = patch_size
__magic_name__ : Tuple = num_frames
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = attention_type
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__magic_name__ : Optional[Any] = self.num_labels
return config
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
# verify the logits shape
__magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = TimesformerModelTester(self )
__magic_name__ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
__magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
for model_class in self.all_model_classes:
__magic_name__ : Tuple = self.model_tester.seq_length
__magic_name__ : int = self.model_tester.num_frames
__magic_name__ : Any = True
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = True
__magic_name__ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__magic_name__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
__magic_name__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
__magic_name__ : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase__ )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Any = prepare_video()
__magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 | 1 |
from math import factorial
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_A, _A ) or not isinstance(_A, _A ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__magic_name__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__magic_name__ : Any = float(factorial(_A ) )
coefficient /= factorial(_A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__: Tuple = logging.get_logger(__name__)
__magic_name__: Any = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
__magic_name__: Optional[int] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
__magic_name__: str = "</w>"
__magic_name__: Tuple = "@@ "
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = set()
__magic_name__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[str] = char
return pairs
# Speech2Text2 has no max input length
__magic_name__: Dict = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__=False , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Optional[int] = do_lower_case
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Tuple = json.load(lowerCAmelCase__ )
__magic_name__ : List[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
__magic_name__ : Any = None
__magic_name__ : List[str] = None
else:
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : int = merges_handle.read().split("""\n""" )[:-1]
__magic_name__ : List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
__magic_name__ : Dict = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Optional[Any] = {}
@property
def __magic_name__ ( self ) -> int:
return len(self.decoder )
def __magic_name__ ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Optional[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__magic_name__ : Any = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : List[str] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : str = bigram
__magic_name__ : List[str] = []
__magic_name__ : Tuple = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : List[str] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Tuple = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Any = """ """.join(lowerCAmelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
__magic_name__ : Optional[int] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(lowerCAmelCase__ ):
__magic_name__ : Dict = word.replace(lowerCAmelCase__ , """""" )
__magic_name__ : Any = word.replace(""" """ , lowerCAmelCase__ )
__magic_name__ : List[Any] = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
__magic_name__ : List[str] = text.lower()
__magic_name__ : Optional[int] = text.split()
__magic_name__ : Dict = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(""" """ ) ) )
return split_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ : int = self.decoder.get(lowerCAmelCase__ , self.unk_token )
return result
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
# make sure @@ tokens are concatenated
__magic_name__ : Tuple = """""".join(string.split(lowerCAmelCase__ ) )
return string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : List[str] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Dict = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 342 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__: str = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = ["MaskFormerFeatureExtractor"]
__magic_name__: Tuple = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: str = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
__magic_name__: List[str] = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 342 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 1 |
import logging
import os
from .state import PartialState
class snake_case__ ( logging.LoggerAdapter ):
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> Dict:
__magic_name__ : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__magic_name__ : Tuple = kwargs.pop("""main_process_only""" , lowerCAmelCase__ )
__magic_name__ : str = kwargs.pop("""in_order""" , lowerCAmelCase__ )
if self.isEnabledFor(lowerCAmelCase__ ):
if self._should_log(lowerCAmelCase__ ):
__magic_name__ ,__magic_name__ : Union[str, Any] = self.process(lowerCAmelCase__ , lowerCAmelCase__ )
self.logger.log(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
elif in_order:
__magic_name__ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__magic_name__ ,__magic_name__ : Any = self.process(lowerCAmelCase__ , lowerCAmelCase__ )
self.logger.log(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
state.wait_for_everyone()
def UpperCamelCase ( _A, _A = None ):
"""simple docstring"""
if log_level is None:
__magic_name__ : int = os.environ.get("""ACCELERATE_LOG_LEVEL""", _A )
__magic_name__ : int = logging.getLogger(_A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_A, {} )
| 342 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase ( _A ):
"""simple docstring"""
for param in module.parameters():
__magic_name__ : List[Any] = False
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__magic_name__ : Optional[int] = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[str] = plt.imshow(_A )
fig.axes.get_xaxis().set_visible(_A )
fig.axes.get_yaxis().set_visible(_A )
plt.show()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = datetime.now()
__magic_name__ : Optional[int] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__: Tuple = logging.get_logger(__name__)
__magic_name__: Tuple = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__: List[Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__: str = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : List[str] = json.loads(f.read() )
__magic_name__ : Optional[Any] = collections.OrderedDict()
__magic_name__ : List[str] = collections.OrderedDict()
__magic_name__ : int = collections.OrderedDict()
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Tuple = f.readlines()
__magic_name__ : List[Any] = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_A ):
__magic_name__ : Union[str, Any] = b
__magic_name__ : str = idx
for wd in b:
__magic_name__ : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|startoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Any:
super().__init__(
unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , do_clean_text=lowerCAmelCase__ , **lowerCAmelCase__ , )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__magic_name__ : Tuple = do_clean_text
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = load_vocab_and_emoji(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __magic_name__ ( self ) -> List[Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __magic_name__ ( self ) -> int:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
return self.subword_tokenizer.tokenize(lowerCAmelCase__ , clean=self.do_clean_text )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = """""".join(lowerCAmelCase__ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
__magic_name__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__magic_name__ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : List[str] = 0
if os.path.isdir(lowerCAmelCase__ ):
__magic_name__ : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__magic_name__ : Tuple = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__magic_name__ : int = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__magic_name__ : int = token_index
writer.write(""",""".join(lowerCAmelCase__ ) + """\n""" )
index += 1
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowerCAmelCase__ )
return vocab_file, emoji_file
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Tuple = vocab # same as swe
__magic_name__ : Union[str, Any] = ids_to_tokens # same as bpe
__magic_name__ : Optional[Any] = emoji
__magic_name__ : Optional[Any] = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
__magic_name__ : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__magic_name__ : int = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__magic_name__ : Dict = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__magic_name__ : List[Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : List[Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__magic_name__ : Optional[Any] = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__magic_name__ : Union[str, Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__magic_name__ : int = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__magic_name__ : Optional[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ) -> Optional[Any]:
return len(self.ids_to_tokens )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.content_repattera.sub("""<URL>""" , lowerCAmelCase__ )
__magic_name__ : Dict = self.content_repattera.sub("""<EMAIL>""" , lowerCAmelCase__ )
__magic_name__ : str = self.content_repattera.sub("""<TEL>""" , lowerCAmelCase__ )
__magic_name__ : Optional[int] = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase__ )
__magic_name__ : int = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase__ )
__magic_name__ : Any = self.content_repattera.sub("""<PRICE>""" , lowerCAmelCase__ )
__magic_name__ : List[str] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__magic_name__ : Dict = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
__magic_name__ : Dict = text.replace(""" """ , """<SP>""" )
__magic_name__ : List[Any] = text.replace(""" """ , """<SP>""" )
__magic_name__ : str = text.replace("""\r\n""" , """<BR>""" )
__magic_name__ : Dict = text.replace("""\n""" , """<BR>""" )
__magic_name__ : List[str] = text.replace("""\r""" , """<BR>""" )
__magic_name__ : Tuple = text.replace("""\t""" , """<TAB>""" )
__magic_name__ : Any = text.replace("""—""" , """ー""" )
__magic_name__ : int = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__magic_name__ : int = text.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if clean:
__magic_name__ : Optional[Any] = self.clean_text(lowerCAmelCase__ )
def check_simbol(lowerCAmelCase__ ):
__magic_name__ : int = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
__magic_name__ : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(lowerCAmelCase__ ):
__magic_name__ : Optional[int] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
__magic_name__ : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Any = []
while pos < len(lowerCAmelCase__ ):
__magic_name__ : int = min(len(lowerCAmelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__magic_name__ : List[str] = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
__magic_name__ : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
__magic_name__ : List[str] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
__magic_name__ ,__magic_name__ ,__magic_name__ : int = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(lowerCAmelCase__ )
__magic_name__ : List[Any] = e
else:
__magic_name__ : str = pos + 1
__magic_name__ : Optional[int] = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCAmelCase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__magic_name__ : str = end
return result
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__="\n" ) -> str:
__magic_name__ : List[Any] = []
__magic_name__ : List[str] = []
__magic_name__ : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : List[str] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__magic_name__ : List[str] = """""".join(lowerCAmelCase__ )
return text
| 342 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__magic_name__: Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Any = {}
with open(_A, """r""" ) as file:
for line_number, line in enumerate(_A ):
__magic_name__ : Optional[int] = line.strip()
if line:
__magic_name__ : Optional[Any] = line.split()
__magic_name__ : str = line_number
__magic_name__ : Optional[Any] = words[0]
__magic_name__ : int = value
return result
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__magic_name__ : int = getattr(_A, _A )
__magic_name__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
__magic_name__ : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__magic_name__ : List[Any] = """param"""
if weight_type is not None and weight_type != "param":
__magic_name__ : Tuple = getattr(_A, _A ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__ : List[Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__magic_name__ : List[str] = getattr(_A, _A )
__magic_name__ : List[Any] = shape_pointer.shape
# let's reduce dimension
__magic_name__ : int = value[0]
else:
__magic_name__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : int = value
elif weight_type == "weight_g":
__magic_name__ : Dict = value
elif weight_type == "weight_v":
__magic_name__ : Tuple = value
elif weight_type == "bias":
__magic_name__ : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__magic_name__ : str = getattr(_A, _A )
__magic_name__ : Any = value
else:
__magic_name__ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
__magic_name__ : Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__magic_name__ : str = """param"""
if weight_type is not None and weight_type != "param":
__magic_name__ : Dict = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__ : Dict = """.""".join([key, hf_param_name] )
else:
__magic_name__ : Optional[Any] = key
__magic_name__ : str = value if """lm_head""" in full_key else value[0]
__magic_name__: Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def UpperCamelCase ( _A, _A, _A=None, _A=None ):
"""simple docstring"""
__magic_name__ : str = False
for key, mapped_key in MAPPING.items():
__magic_name__ : Optional[int] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__ : Optional[int] = True
if "*" in mapped_key:
__magic_name__ : Any = name.split(_A )[0].split(""".""" )[-2]
__magic_name__ : Optional[int] = mapped_key.replace("""*""", _A )
if "weight_g" in name:
__magic_name__ : Any = """weight_g"""
elif "weight_v" in name:
__magic_name__ : Optional[int] = """weight_v"""
elif "bias" in name:
__magic_name__ : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : int = """weight"""
else:
__magic_name__ : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_A, _A, _A, _A, _A )
else:
set_recursively(_A, _A, _A, _A, _A )
return is_used
return is_used
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : List[str] = fairseq_model.state_dict()
__magic_name__ : Dict = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_A, _A, _A, _A, hf_model.config.feat_extract_norm == """group""", )
__magic_name__ : List[Any] = True
else:
__magic_name__ : Union[str, Any] = load_wavaveca_layer(_A, _A, _A )
if not is_used:
unused_weights.append(_A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = full_name.split("""conv_layers.""" )[-1]
__magic_name__ : Optional[Any] = name.split(""".""" )
__magic_name__ : Tuple = int(items[0] )
__magic_name__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A=None, _A=None, _A=True, _A=False ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : Optional[Any] = WavaVecaConfig.from_pretrained(_A )
else:
__magic_name__ : Tuple = WavaVecaConfig()
if is_seq_class:
__magic_name__ : int = read_txt_into_dict(_A )
__magic_name__ : List[Any] = idalabel
__magic_name__ : str = WavaVecaForSequenceClassification(_A )
__magic_name__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
__magic_name__ : Tuple = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : int = target_dict.pad_index
__magic_name__ : Dict = target_dict.bos_index
__magic_name__ : Tuple = target_dict.eos_index
__magic_name__ : Any = len(target_dict.symbols )
__magic_name__ : List[Any] = os.path.join(_A, """vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A, exist_ok=_A )
__magic_name__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : Optional[int] = 0
__magic_name__ : Union[str, Any] = 1
with open(_A, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(_A, _A )
__magic_name__ : int = WavaVecaCTCTokenizer(
_A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=_A, )
__magic_name__ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__magic_name__ : Dict = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
__magic_name__ : List[str] = WavaVecaProcessor(feature_extractor=_A, tokenizer=_A )
processor.save_pretrained(_A )
__magic_name__ : Dict = WavaVecaForCTC(_A )
else:
__magic_name__ : Dict = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__magic_name__ : Union[str, Any] = argparse.Namespace(task="""audio_pretraining""" )
__magic_name__ : int = fairseq.tasks.setup_task(_A )
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=_A )
__magic_name__ : List[Any] = model[0].eval()
recursively_load_weights(_A, _A, not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__magic_name__: List[str] = parser.parse_args()
__magic_name__: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Any = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Dict = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Dict = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : str = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : str = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[int] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[str] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : str = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[str] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[int] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Any = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[str] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
| 342 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: Dict = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = '''ctrl'''
lowercase__ : Tuple = ['''past_key_values''']
lowercase__ : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCAmelCase__=24_65_34 , lowerCAmelCase__=2_56 , lowerCAmelCase__=12_80 , lowerCAmelCase__=81_92 , lowerCAmelCase__=48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1e-6 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Optional[int] = n_positions
__magic_name__ : int = n_embd
__magic_name__ : Optional[int] = n_layer
__magic_name__ : List[Any] = n_head
__magic_name__ : str = dff
__magic_name__ : Tuple = resid_pdrop
__magic_name__ : Any = embd_pdrop
__magic_name__ : Tuple = layer_norm_epsilon
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[Any] = use_cache
super().__init__(**lowerCAmelCase__ )
| 342 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : int = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Dict = patch_size
__magic_name__ : Tuple = num_frames
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = attention_type
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__magic_name__ : Optional[Any] = self.num_labels
return config
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
# verify the logits shape
__magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = TimesformerModelTester(self )
__magic_name__ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
__magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
for model_class in self.all_model_classes:
__magic_name__ : Tuple = self.model_tester.seq_length
__magic_name__ : int = self.model_tester.num_frames
__magic_name__ : Any = True
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = True
__magic_name__ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__magic_name__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
__magic_name__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
__magic_name__ : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase__ )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Any = prepare_video()
__magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 |
from math import factorial
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_A, _A ) or not isinstance(_A, _A ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__magic_name__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__magic_name__ : Any = float(factorial(_A ) )
coefficient /= factorial(_A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 342 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''new-model'''
if is_tf_available():
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = NewModelConfig
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = """bert-base-cased"""
__magic_name__ : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> int:
__magic_name__ : List[Any] = """bert-base-cased"""
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Any:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> int:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Any = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__magic_name__ : int = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__magic_name__ : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_probability
def __magic_name__ ( self ) -> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__magic_name__ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_44_10 )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_44_10 )
def __magic_name__ ( self ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__magic_name__ : Dict = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = copy.deepcopy(model.config )
__magic_name__ : str = ["""FunnelBaseModel"""]
__magic_name__ : Dict = TFAutoModel.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : Optional[int] = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
try:
AutoConfig.register("""new-model""" , lowerCAmelCase__ )
__magic_name__ : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ : str = BertModelTester(self ).get_config()
__magic_name__ : Tuple = NewModelConfig(**tiny_config.to_dict() )
__magic_name__ : Tuple = auto_class.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : List[str] = auto_class.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __magic_name__ ( self ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__magic_name__ : List[str] = TFAutoModel.from_pretrained("""bert-base""" )
def __magic_name__ ( self ) -> Tuple:
with self.assertRaisesRegex(
lowerCAmelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__magic_name__ : Optional[int] = TFAutoModel.from_pretrained(lowerCAmelCase__ , revision="""aaaaaa""" )
def __magic_name__ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__magic_name__ : Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __magic_name__ ( self ) -> Tuple:
with self.assertRaisesRegex(lowerCAmelCase__ , """Use `from_pt=True` to load this model""" ):
__magic_name__ : str = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __magic_name__ ( self ) -> List[Any]:
# Make sure we have cached the model.
__magic_name__ : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__magic_name__ : Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__magic_name__ : List[str] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__magic_name__ : Tuple = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 342 |
from __future__ import annotations
def UpperCamelCase ( _A ): # This function is recursive
"""simple docstring"""
__magic_name__ : str = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__magic_name__ : Dict = array[0]
__magic_name__ : Optional[Any] = False
__magic_name__ : Tuple = 1
__magic_name__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[Any] = [element for element in array[i:] if element >= array[i]]
__magic_name__ : Dict = longest_subsequence(_A )
if len(_A ) > len(_A ):
__magic_name__ : Tuple = temp_array
else:
i += 1
__magic_name__ : Any = [element for element in array[1:] if element >= pivot]
__magic_name__ : Dict = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__=0.0_1 , lowerCAmelCase__=10_00 ) -> Tuple:
__magic_name__ : List[str] = p_stop
__magic_name__ : str = max_length
def __iter__( self ) -> List[Any]:
__magic_name__ : Optional[Any] = 0
__magic_name__ : Any = False
while not stop and count < self.max_length:
yield count
count += 1
__magic_name__ : List[str] = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> Tuple:
__magic_name__ : Optional[int] = [
BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
for i in range(2 )
]
__magic_name__ : List[Any] = [list(lowerCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase__ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase__ ) for e in expected] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
__magic_name__ : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__magic_name__ : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__magic_name__ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__magic_name__ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is very small.
__magic_name__ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__magic_name__ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
__magic_name__ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__magic_name__ : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__magic_name__ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__magic_name__ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
__magic_name__ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Any = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__magic_name__ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__magic_name__ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__magic_name__ : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__magic_name__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__magic_name__ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
__magic_name__ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__magic_name__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
__magic_name__ : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : str = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__magic_name__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
__magic_name__ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : int = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
__magic_name__ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__magic_name__ : Dict = [BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , even_batches=lowerCAmelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=False ) -> Any:
random.seed(lowerCAmelCase__ )
__magic_name__ : Optional[int] = list(lowerCAmelCase__ )
__magic_name__ : List[Any] = [
IterableDatasetShard(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , drop_last=lowerCAmelCase__ , num_processes=lowerCAmelCase__ , process_index=lowerCAmelCase__ , split_batches=lowerCAmelCase__ , )
for i in range(lowerCAmelCase__ )
]
__magic_name__ : List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase__ )
iterable_dataset_lists.append(list(lowerCAmelCase__ ) )
__magic_name__ : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__magic_name__ : int = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(len(lowerCAmelCase__ ) % shard_batch_size == 0 )
__magic_name__ : str = []
for idx in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
reference += reference
self.assertListEqual(lowerCAmelCase__ , reference[: len(lowerCAmelCase__ )] )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Tuple = 42
__magic_name__ : Optional[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Edge case with a very small dataset
__magic_name__ : Dict = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = SkipBatchSampler(lowerCAmelCase__ , 2 )
self.assertListEqual(list(lowerCAmelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __magic_name__ ( self ) -> str:
__magic_name__ : List[Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__magic_name__ : Optional[int] = skip_first_batches(lowerCAmelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __magic_name__ ( self ) -> Dict:
Accelerator()
__magic_name__ : List[str] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 342 |
import argparse
import os
import re
__magic_name__: Optional[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__magic_name__: Any = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__magic_name__: Tuple = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase ( _A, _A = False ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Any = f.read()
__magic_name__ : List[Any] = content.split("""\n""" )
__magic_name__ : List[str] = []
__magic_name__ : Union[str, Any] = 0
while line_idx < len(_A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__magic_name__ : Any = len(re.search(R"""^(\s*)\S""", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__magic_name__ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__magic_name__ : List[str] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__magic_name__ : Union[str, Any] = sorted(_A, key=lambda _A : _re_identifier.search(_A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(_A ) )
elif "\n".join(_A ) != content:
return True
def UpperCamelCase ( _A = False ):
"""simple docstring"""
__magic_name__ : Any = [os.path.join(_A, _A ) for f in os.listdir(_A ) if f.endswith(""".py""" )]
__magic_name__ : List[str] = [sort_auto_mapping(_A, overwrite=_A ) for fname in fnames]
if not overwrite and any(_A ):
__magic_name__ : Optional[Any] = [f for f, d in zip(_A, _A ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_A )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__magic_name__: List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_28 , lowerCAmelCase__ = 2_56 , lowerCAmelCase__ = 2_0_0_0.0 , lowerCAmelCase__ = 7_68 , lowerCAmelCase__ = 12 , lowerCAmelCase__ = 12 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = 20_48 , lowerCAmelCase__ = 0.1 , ) -> str:
super().__init__()
__magic_name__ : Optional[Any] = nn.Sequential(
nn.Linear(lowerCAmelCase__ , d_model * 4 , bias=lowerCAmelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase__ ) , nn.SiLU() , )
__magic_name__ : Tuple = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : int = False
__magic_name__ : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = nn.Dropout(p=lowerCAmelCase__ )
__magic_name__ : Tuple = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
# FiLM conditional T5 decoder
__magic_name__ : Tuple = DecoderLayer(d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ )
self.decoders.append(lowerCAmelCase__ )
__magic_name__ : Any = TaLayerNorm(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = nn.Dropout(p=lowerCAmelCase__ )
__magic_name__ : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__magic_name__ : Dict = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__magic_name__ : Optional[Any] = self.conditioning_emb(lowerCAmelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__magic_name__ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__magic_name__ : Tuple = torch.broadcast_to(
torch.arange(lowerCAmelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__magic_name__ : Optional[Any] = self.position_encoding(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.continuous_inputs_projection(lowerCAmelCase__ )
inputs += position_encodings
__magic_name__ : Optional[int] = self.dropout(lowerCAmelCase__ )
# decoder: No padding present.
__magic_name__ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__magic_name__ : Optional[Any] = [(x, self.encoder_decoder_mask(lowerCAmelCase__ , lowerCAmelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__magic_name__ : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__magic_name__ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__magic_name__ : Dict = lyr(
lowerCAmelCase__ , conditioning_emb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )[0]
__magic_name__ : Union[str, Any] = self.decoder_norm(lowerCAmelCase__ )
__magic_name__ : Any = self.post_dropout(lowerCAmelCase__ )
__magic_name__ : List[str] = self.spec_out(lowerCAmelCase__ )
return spec_out
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1e-6 ) -> Any:
super().__init__()
__magic_name__ : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> int:
__magic_name__ : Union[str, Any] = self.layer[0](
lowerCAmelCase__ , conditioning_emb=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , )
if encoder_hidden_states is not None:
__magic_name__ : int = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
__magic_name__ : Dict = self.layer[1](
lowerCAmelCase__ , key_value_states=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , )
# Apply Film Conditional Feed Forward layer
__magic_name__ : Any = self.layer[-1](lowerCAmelCase__ , lowerCAmelCase__ )
return (hidden_states,)
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
super().__init__()
__magic_name__ : List[str] = TaLayerNorm(lowerCAmelCase__ )
__magic_name__ : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase__ )
__magic_name__ : List[Any] = Attention(query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , out_bias=lowerCAmelCase__ , scale_qk=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = nn.Dropout(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any:
# pre_self_attention_layer_norm
__magic_name__ : Optional[Any] = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
__magic_name__ : Optional[Any] = self.FiLMLayer(lowerCAmelCase__ , lowerCAmelCase__ )
# Self-attention block
__magic_name__ : int = self.attention(lowerCAmelCase__ )
__magic_name__ : int = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
super().__init__()
__magic_name__ : Dict = Attention(query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , out_bias=lowerCAmelCase__ , scale_qk=lowerCAmelCase__ )
__magic_name__ : Optional[int] = TaLayerNorm(lowerCAmelCase__ , eps=lowerCAmelCase__ )
__magic_name__ : List[str] = nn.Dropout(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
__magic_name__ : Optional[int] = self.layer_norm(lowerCAmelCase__ )
__magic_name__ : Tuple = self.attention(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
__magic_name__ : Tuple = hidden_states + self.dropout(lowerCAmelCase__ )
return layer_output
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
super().__init__()
__magic_name__ : int = TaDenseGatedActDense(d_model=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ )
__magic_name__ : str = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase__ )
__magic_name__ : List[str] = TaLayerNorm(lowerCAmelCase__ , eps=lowerCAmelCase__ )
__magic_name__ : str = nn.Dropout(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
__magic_name__ : int = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
__magic_name__ : Union[str, Any] = self.film(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = self.DenseReluDense(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
super().__init__()
__magic_name__ : Union[str, Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__magic_name__ : Optional[int] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__magic_name__ : List[str] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__magic_name__ : str = nn.Dropout(lowerCAmelCase__ )
__magic_name__ : Tuple = NewGELUActivation()
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Tuple = self.act(self.wi_a(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = self.wi_a(lowerCAmelCase__ )
__magic_name__ : str = hidden_gelu * hidden_linear
__magic_name__ : Dict = self.dropout(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.wo(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1e-6 ) -> List[Any]:
super().__init__()
__magic_name__ : Any = nn.Parameter(torch.ones(lowerCAmelCase__ ) )
__magic_name__ : Dict = eps
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__magic_name__ : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__magic_name__ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class snake_case__ ( nn.Module ):
def __magic_name__ ( self , lowerCAmelCase__ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowerCAmelCase__ , 3.0 )) ))
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
super().__init__()
__magic_name__ : Any = nn.Linear(lowerCAmelCase__ , out_features * 2 , bias=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : Any = self.scale_bias(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : str = torch.chunk(lowerCAmelCase__ , 2 , -1 )
__magic_name__ : int = x * (1 + scale) + shift
return x
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__magic_name__: Any = get_logger(__name__)
__magic_name__: Union[str, Any] = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class snake_case__ :
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case__ :
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case__ ( _lowerCAmelCase ):
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> jnp.ndarray:
for processor in self:
__magic_name__ : str = inspect.signature(processor.__call__ ).parameters
if len(lowerCAmelCase__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
__magic_name__ : Optional[int] = processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
else:
__magic_name__ : str = processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
__magic_name__ : List[Any] = temperature
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
__magic_name__ : Dict = scores / self.temperature
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = -float("""Inf""" ) , lowerCAmelCase__ = 1 ) -> Dict:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
__magic_name__ : Dict = top_p
__magic_name__ : int = filter_value
__magic_name__ : Tuple = min_tokens_to_keep
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
__magic_name__ ,__magic_name__ : Optional[Any] = lax.top_k(lowerCAmelCase__ , scores.shape[-1] )
__magic_name__ : str = jnp.full_like(lowerCAmelCase__ , self.filter_value )
__magic_name__ : List[Any] = jax.nn.softmax(lowerCAmelCase__ , axis=-1 ).cumsum(axis=-1 )
__magic_name__ : Union[str, Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__magic_name__ : Optional[int] = jnp.roll(lowerCAmelCase__ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCAmelCase__ )
# min tokens to keep
__magic_name__ : List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = jnp.where(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = jax.lax.sort_key_val(lowerCAmelCase__ , lowerCAmelCase__ )[-1]
return next_scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = -float("""Inf""" ) , lowerCAmelCase__ = 1 ) -> Dict:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
__magic_name__ : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : int = filter_value
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
__magic_name__ ,__magic_name__ : Optional[int] = scores.shape
__magic_name__ : Any = jnp.full(batch_size * vocab_size , self.filter_value )
__magic_name__ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
__magic_name__ ,__magic_name__ : Union[str, Any] = lax.top_k(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : int = jnp.broadcast_to((jnp.arange(lowerCAmelCase__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__magic_name__ : Any = topk_scores.flatten()
__magic_name__ : Dict = topk_indices.flatten() + shift
__magic_name__ : Union[str, Any] = next_scores_flat.at[topk_indices_flat].set(lowerCAmelCase__ )
__magic_name__ : Tuple = next_scores_flat.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
return next_scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : List[Any] = bos_token_id
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
__magic_name__ : List[str] = jnp.full(scores.shape , -float("""inf""" ) )
__magic_name__ : Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__magic_name__ : List[Any] = jnp.where(lowerCAmelCase__ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCAmelCase__ )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = max_length
__magic_name__ : Optional[Any] = eos_token_id
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
__magic_name__ : Tuple = jnp.full(scores.shape , -float("""inf""" ) )
__magic_name__ : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__magic_name__ : Dict = jnp.where(lowerCAmelCase__ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCAmelCase__ )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
__magic_name__ : Union[str, Any] = min_length
__magic_name__ : int = eos_token_id
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__magic_name__ : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__magic_name__ : List[str] = jnp.where(lowerCAmelCase__ , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , lowerCAmelCase__ )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
__magic_name__ : Dict = list(lowerCAmelCase__ )
__magic_name__ : Tuple = begin_index
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__magic_name__ : Optional[Any] = jnp.where(lowerCAmelCase__ , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , lowerCAmelCase__ )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
__magic_name__ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : List[Any] = dict(lowerCAmelCase__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__magic_name__ : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__magic_name__ : Optional[int] = force_token_array.at[index].set(lowerCAmelCase__ )
__magic_name__ : Tuple = jnp.intaa(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> jnp.ndarray:
def _force_token(lowerCAmelCase__ ):
__magic_name__ : str = scores.shape[0]
__magic_name__ : List[Any] = self.force_token_array[generation_idx]
__magic_name__ : Union[str, Any] = jnp.ones_like(lowerCAmelCase__ , dtype=scores.dtype ) * -float("""inf""" )
__magic_name__ : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__magic_name__ : Optional[int] = lax.dynamic_update_slice(lowerCAmelCase__ , lowerCAmelCase__ , (0, current_token) )
return new_scores
__magic_name__ : List[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCAmelCase__ ) , lambda: scores , ) , )
return scores
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Tuple = generate_config.eos_token_id
__magic_name__ : Tuple = generate_config.no_timestamps_token_id
__magic_name__ : Dict = generate_config.no_timestamps_token_id + 1
__magic_name__ : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCAmelCase__ , """max_initial_timestamp_index""" ):
__magic_name__ : Optional[Any] = generate_config.max_initial_timestamp_index
else:
__magic_name__ : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__magic_name__ : Optional[int] = model_config.vocab_size
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# suppress <|notimestamps|> which is handled by without_timestamps
__magic_name__ : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : List[Any] = jnp.where((cur_len - self.begin_index) >= 1 , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCAmelCase__ , )
__magic_name__ : Optional[Any] = jnp.where((cur_len - self.begin_index) < 2 , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCAmelCase__ , lowerCAmelCase__ , )
return jnp.where(
lowerCAmelCase__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , lowerCAmelCase__ , )
__magic_name__ : Tuple = jax.vmap(lowerCAmelCase__ )(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = jnp.where(cur_len == self.begin_index , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCAmelCase__ , )
__magic_name__ : Optional[Any] = self.timestamp_begin + self.max_initial_timestamp_index
__magic_name__ : int = jnp.where(
lowerCAmelCase__ , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , lowerCAmelCase__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__magic_name__ : Dict = jax.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
def handle_cumulative_probs(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : List[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__magic_name__ : Tuple = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , lowerCAmelCase__ , )
__magic_name__ : Dict = jax.vmap(lowerCAmelCase__ )(lowerCAmelCase__ , lowerCAmelCase__ )
return scores
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__magic_name__ : int = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
__magic_name__: Dict = input("Enter a string ").strip()
__magic_name__: Union[str, Any] = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: List[Any] = logging.get_logger(__name__)
__magic_name__: Any = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''trocr'''
lowercase__ : Union[str, Any] = ['''past_key_values''']
lowercase__ : Optional[Any] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=10_24 , lowerCAmelCase__=12 , lowerCAmelCase__=16 , lowerCAmelCase__=40_96 , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Any:
__magic_name__ : List[str] = vocab_size
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[Any] = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : int = decoder_ffn_dim
__magic_name__ : List[str] = activation_function
__magic_name__ : int = max_position_embeddings
__magic_name__ : int = dropout
__magic_name__ : Any = attention_dropout
__magic_name__ : Tuple = activation_dropout
__magic_name__ : List[Any] = init_std
__magic_name__ : str = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : str = scale_embedding
__magic_name__ : Tuple = use_learned_position_embeddings
__magic_name__ : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 342 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 1 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__magic_name__: Optional[int] = logging.get_logger(__name__)
def UpperCamelCase ( _A, _A, _A, _A=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__magic_name__ : List[str] = os.path.abspath(_A )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__magic_name__ : str = torch.load(_A, map_location="""cpu""" )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__magic_name__ : Optional[int] = convert_pytorch_state_dict_to_flax(_A, _A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__magic_name__ : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_A, _A )
return flax_state_dict
def UpperCamelCase ( _A, _A, _A, _A, ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_A ) -> bool:
return len(set(_A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__magic_name__ : Union[str, Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__magic_name__ : List[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__magic_name__ : Tuple = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__magic_name__ : Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__magic_name__ : List[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_A ):
__magic_name__ : Union[str, Any] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__magic_name__ : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_A ):
__magic_name__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__magic_name__ : str = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__magic_name__ : Dict = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__magic_name__ : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__magic_name__ : Dict = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__magic_name__ : Optional[Any] = pt_tuple_key[-2] + """_v"""
if name is not None:
__magic_name__ : Optional[int] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
__magic_name__ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__magic_name__ : str = flax_model.params["""params"""]
else:
__magic_name__ : List[Any] = flax_model.params
__magic_name__ : Dict = flatten_dict(_A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__magic_name__ : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_A )
__magic_name__ : Optional[Any] = {}
__magic_name__ : str = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__magic_name__ : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__magic_name__ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
__magic_name__ ,__magic_name__ : List[str] = rename_key_and_reshape_tensor(
_A, _A, _A, _A )
# add model prefix if necessary
__magic_name__ : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__magic_name__ : str = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A, _A )
continue
# also add unexpected weight so that warning is thrown
__magic_name__ : Optional[Any] = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
__magic_name__ : Tuple = jnp.asarray(_A )
return unflatten_dict(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
import torch
# Load the index
__magic_name__ : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__magic_name__ : Optional[int] = torch.load(_A )
__magic_name__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
__magic_name__ : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__magic_name__ : List[str] = flax_model.params["""params"""]
__magic_name__ : Optional[Any] = flatten_dict(_A )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__magic_name__ : List[str] = flax_model.params
__magic_name__ : Optional[Any] = flatten_dict(_A )
__magic_name__ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__magic_name__ : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__magic_name__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
__magic_name__ ,__magic_name__ : str = rename_key_and_reshape_tensor(
_A, _A, _A, _A )
# add model prefix if necessary
__magic_name__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__magic_name__ : Optional[int] = jnp.asarray(_A )
continue
if "var" in flax_key[-1]:
__magic_name__ : str = jnp.asarray(_A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_A, _A )
continue
# also add unexpected weight so that warning is thrown
__magic_name__ : int = jnp.asarray(_A )
else:
# also add unexpected weight so that warning is thrown
__magic_name__ : List[str] = jnp.asarray(_A )
return unflatten_dict(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = os.path.abspath(_A )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__magic_name__ : Union[str, Any] = getattr(_A, """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_A, """rb""" ) as state_f:
try:
__magic_name__ : List[Any] = from_bytes(_A, state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_A, _A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__magic_name__ : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda _A : x.dtype == jnp.bfloataa, _A ) ).values()
if any(_A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__magic_name__ : Optional[Any] = jax.tree_util.tree_map(
lambda _A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, _A )
__magic_name__ : List[Any] = flatten_dict(_A )
__magic_name__ : Tuple = pt_model.state_dict()
__magic_name__ : Any = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__magic_name__ : Union[str, Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__magic_name__ : int = []
__magic_name__ : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__magic_name__ : int = flax_key_tuple[0] == pt_model.base_model_prefix
__magic_name__ : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Dict = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_A ) not in pt_model_dict:
# conv layer
__magic_name__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
__magic_name__ : List[Any] = jnp.transpose(_A, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_A ) not in pt_model_dict:
# linear layer
__magic_name__ : str = flax_key_tuple[:-1] + ("""weight""",)
__magic_name__ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__magic_name__ : Any = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__magic_name__ : List[Any] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
__magic_name__ : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
__magic_name__ : List[str] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__magic_name__ : List[Any] = """.""".join(_A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__magic_name__ : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__magic_name__ : str = key.split(""".""" )
__magic_name__ : List[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__magic_name__ : Dict = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
__magic_name__ : List[str] = key_components[-2] + """_v"""
if name is not None:
__magic_name__ : Dict = key_components[:-3] + [name]
__magic_name__ : List[str] = """.""".join(_A )
__magic_name__ : List[Any] = key
if flax_key in special_pt_names:
__magic_name__ : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__magic_name__ : int = np.asarray(_A ) if not isinstance(_A, np.ndarray ) else flax_tensor
__magic_name__ : Optional[Any] = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
__magic_name__ : int = list(_A )
if len(_A ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_A ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 342 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 1 |
from math import isqrt
def UpperCamelCase ( _A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2, isqrt(_A ) + 1 ) )
def UpperCamelCase ( _A = 10**6 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
__magic_name__ : Any = 1
__magic_name__ : Optional[int] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
__magic_name__ : Dict = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
__magic_name__ : List[Any] = int(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = dict(sorted(self.labels.items() ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
__magic_name__ : Optional[int] = len(lowerCAmelCase__ )
__magic_name__ : Any = self.transformer.config.sample_size
__magic_name__ : List[str] = self.transformer.config.in_channels
__magic_name__ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
__magic_name__ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__magic_name__ : List[str] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
__magic_name__ : List[str] = torch.tensor([10_00] * batch_size , device=self.device )
__magic_name__ : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__magic_name__ : Dict = latent_model_input[: len(lowerCAmelCase__ ) // 2]
__magic_name__ : Union[str, Any] = torch.cat([half, half] , dim=0 )
__magic_name__ : Dict = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__magic_name__ : Optional[int] = latent_model_input.device.type == """mps"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Tuple = torch.floataa if is_mps else torch.floataa
else:
__magic_name__ : Optional[Any] = torch.intaa if is_mps else torch.intaa
__magic_name__ : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__magic_name__ : int = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__magic_name__ : Any = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
__magic_name__ ,__magic_name__ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__magic_name__ ,__magic_name__ : Dict = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
__magic_name__ : List[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__magic_name__ : Optional[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__magic_name__ : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__magic_name__ ,__magic_name__ : Any = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
__magic_name__ : Tuple = noise_pred
# compute previous image: x_t -> x_t-1
__magic_name__ : str = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
__magic_name__ ,__magic_name__ : Any = latent_model_input.chunk(2 , dim=0 )
else:
__magic_name__ : Optional[Any] = latent_model_input
__magic_name__ : Optional[Any] = 1 / self.vae.config.scaling_factor * latents
__magic_name__ : Union[str, Any] = self.vae.decode(lowerCAmelCase__ ).sample
__magic_name__ : List[Any] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__magic_name__ : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : Dict = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 342 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = CodeGenTokenizer
lowercase__ : int = CodeGenTokenizerFast
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = {'''add_prefix_space''': True}
lowercase__ : Dict = False
def __magic_name__ ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__magic_name__ : Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__magic_name__ : Union[str, Any] = {"""unk_token""": """<unk>"""}
__magic_name__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Tuple = """lower newer"""
__magic_name__ : Optional[int] = """lower newer"""
return input_text, output_text
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Union[str, Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : List[str] = """lower newer"""
__magic_name__ : Any = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Tuple = tokens + [tokenizer.unk_token]
__magic_name__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
__magic_name__ : Tuple = self.get_tokenizer()
__magic_name__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
__magic_name__ : Dict = """lower newer"""
# Testing tokenization
__magic_name__ : Tuple = tokenizer.tokenize(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__magic_name__ : int = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing conversion to ids without special tokens
__magic_name__ : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing conversion to ids with special tokens
__magic_name__ : List[Any] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
__magic_name__ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing the unknown token
__magic_name__ : Dict = tokens + [rust_tokenizer.unk_token]
__magic_name__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __magic_name__ ( self , lowerCAmelCase__=15 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
__magic_name__ : List[Any] = """This is a simple input"""
__magic_name__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__ : Union[str, Any] = ("""This is a simple input""", """This is a pair""")
__magic_name__ : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__magic_name__ : List[str] = """This is a simple input"""
__magic_name__ : List[str] = ["""This is a simple input looooooooong""", """This is a simple input"""]
__magic_name__ : List[Any] = ("""This is a simple input""", """This is a pair""")
__magic_name__ : Dict = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__magic_name__ : Tuple = tokenizer.pad_token_id
__magic_name__ : List[Any] = tokenizer(lowerCAmelCase__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__magic_name__ : int = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncate=lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : Tuple = tokenizer(*lowerCAmelCase__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__magic_name__ : Tuple = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncate=lowerCAmelCase__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = """$$$"""
__magic_name__ : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCAmelCase__ , add_bos_token=lowerCAmelCase__ )
__magic_name__ : Optional[int] = """This is a simple input"""
__magic_name__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__ : str = tokenizer.bos_token_id
__magic_name__ : Tuple = tokenizer(lowerCAmelCase__ )
__magic_name__ : int = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] , lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__magic_name__ : Dict = tokenizer.decode(out_s.input_ids )
__magic_name__ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
__magic_name__ : Optional[Any] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
__magic_name__ : Dict = """\nif len_a > len_b: result = a\nelse: result = b"""
__magic_name__ : Any = tokenizer.encode(lowerCAmelCase__ )
__magic_name__ : Tuple = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
__magic_name__ : List[Any] = tokenizer.decode(lowerCAmelCase__ , truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
| 342 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : int = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Dict = patch_size
__magic_name__ : Tuple = num_frames
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = attention_type
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__magic_name__ : Optional[Any] = self.num_labels
return config
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
# verify the logits shape
__magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = TimesformerModelTester(self )
__magic_name__ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
__magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
for model_class in self.all_model_classes:
__magic_name__ : Tuple = self.model_tester.seq_length
__magic_name__ : int = self.model_tester.num_frames
__magic_name__ : Any = True
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = True
__magic_name__ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__magic_name__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
__magic_name__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
__magic_name__ : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase__ )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Any = prepare_video()
__magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__magic_name__: str = get_logger(__name__)
def UpperCamelCase ( _A, _A, _A, _A, _A=0 ):
"""simple docstring"""
os.makedirs(_A, exist_ok=_A )
with FSDP.state_dict_type(
_A, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
__magic_name__ : Tuple = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__magic_name__ : Dict = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__magic_name__ : Tuple = os.path.join(_A, _A )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(_A, _A )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__magic_name__ : Union[str, Any] = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__magic_name__ : List[str] = os.path.join(_A, _A )
logger.info(f'Saving model to {output_model_file}' )
torch.save(_A, _A )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__magic_name__ : Tuple = os.path.join(_A, f'{MODEL_NAME}_{model_index}' )
os.makedirs(_A, exist_ok=_A )
logger.info(f'Saving model to {ckpt_dir}' )
__magic_name__ : int = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=_A, storage_writer=dist_cp.FileSystemWriter(_A ), planner=DefaultSavePlanner(), )
logger.info(f'Model saved to {ckpt_dir}' )
def UpperCamelCase ( _A, _A, _A, _A, _A=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__magic_name__ : str = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__magic_name__ : Optional[Any] = os.path.join(_A, _A )
logger.info(f'Loading model from {input_model_file}' )
__magic_name__ : Optional[int] = torch.load(_A )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__magic_name__ : str = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__magic_name__ : Optional[Any] = os.path.join(_A, _A )
logger.info(f'Loading model from {input_model_file}' )
__magic_name__ : str = torch.load(_A )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__magic_name__ : str = (
os.path.join(_A, f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__magic_name__ : Tuple = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_A, storage_reader=dist_cp.FileSystemReader(_A ), planner=DefaultLoadPlanner(), )
__magic_name__ : List[str] = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(_A )
def UpperCamelCase ( _A, _A, _A, _A, _A, _A=0 ):
"""simple docstring"""
os.makedirs(_A, exist_ok=_A )
with FSDP.state_dict_type(
_A, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
__magic_name__ : Any = FSDP.optim_state_dict(_A, _A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__magic_name__ : List[str] = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__magic_name__ : int = os.path.join(_A, _A )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(_A, _A )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__magic_name__ : Optional[int] = os.path.join(_A, f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(_A, exist_ok=_A )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state}, storage_writer=dist_cp.FileSystemWriter(_A ), planner=DefaultSavePlanner(), )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def UpperCamelCase ( _A, _A, _A, _A, _A, _A=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__magic_name__ : List[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__magic_name__ : List[Any] = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__magic_name__ : List[str] = os.path.join(_A, _A )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__magic_name__ : List[Any] = torch.load(_A )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__magic_name__ : List[str] = (
os.path.join(_A, f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__magic_name__ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key="""optimizer""", storage_reader=dist_cp.FileSystemReader(_A ), )
__magic_name__ : Optional[Any] = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__magic_name__ : List[str] = FSDP.optim_state_dict_to_load(_A, _A, _A )
optimizer.load_state_dict(_A )
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__magic_name__ : int = [True] * (num + 1)
__magic_name__ : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, _A ):
__magic_name__ : List[str] = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__: Union[str, Any] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 342 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 1 |
import random
def UpperCamelCase ( _A, _A, _A = False ):
"""simple docstring"""
__magic_name__ : dict = {i: [] for i in range(_A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_A ):
for j in range(i + 1, _A ):
if random.random() < probability:
graph[i].append(_A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_A )
return graph
def UpperCamelCase ( _A ):
"""simple docstring"""
return {
i: [j for j in range(_A ) if i != j] for i in range(_A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 1 |
from __future__ import annotations
import math
def UpperCamelCase ( _A ):
"""simple docstring"""
if num <= 0:
__magic_name__ : Any = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(_A )
__magic_name__ : Dict = [True] * (num + 1)
__magic_name__ : List[str] = []
__magic_name__ : Any = 2
__magic_name__ : str = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start, num + 1, _A ):
if sieve[i] is True:
__magic_name__ : Optional[int] = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 342 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 | 1 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = CanineTokenizer
lowercase__ : Any = False
def __magic_name__ ( self ) -> int:
super().setUp()
__magic_name__ : Optional[Any] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__ ( self ) -> Any:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> CanineTokenizer:
__magic_name__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
__magic_name__ : int = 10_24
return tokenizer
@require_torch
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.canine_tokenizer
__magic_name__ : Optional[int] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__magic_name__ : List[str] = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__magic_name__ : List[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.canine_tokenizer
__magic_name__ : Optional[int] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__magic_name__ : Tuple = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase__ )
self.assertIn("""attention_mask""" , lowerCAmelCase__ )
self.assertIn("""token_type_ids""" , lowerCAmelCase__ )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = self.canine_tokenizer
__magic_name__ : Dict = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__magic_name__ : Union[str, Any] = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __magic_name__ ( self ) -> int:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = """ He is very happy, UNwant\u00E9d,running"""
__magic_name__ : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__magic_name__ : str = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__magic_name__ : int = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__magic_name__ : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Dict = """ He is very happy, UNwant\u00E9d,running"""
__magic_name__ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ : int = chr(0Xe0_07 )
additional_special_tokens.append(lowerCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__magic_name__ : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__magic_name__ : Dict = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn(lowerCAmelCase__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
__magic_name__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : str = self.get_clean_sequence(lowerCAmelCase__ )
# a special token for Canine can be defined as follows:
__magic_name__ : Tuple = 0Xe0_05
__magic_name__ : List[str] = chr(lowerCAmelCase__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Optional[int] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase__ )
__magic_name__ : int = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__magic_name__ : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , input_encoded + special_token_id )
__magic_name__ : Dict = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Tuple = chr(0Xe0_05 )
__magic_name__ : int = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__magic_name__ : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : str = tokenizer.tokenize(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase__ )
self.assertEqual(token_a[0] , lowerCAmelCase__ )
@require_tokenizers
def __magic_name__ ( self ) -> int:
__magic_name__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
__magic_name__ : Any = 0Xe0_06
__magic_name__ : Tuple = chr(lowerCAmelCase__ )
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase__ )
tokenizer.from_pretrained(lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ : int = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
# a special token for Canine can be defined as follows:
__magic_name__ : Dict = 0Xe0_06
__magic_name__ : Dict = chr(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = [new_token_a]
__magic_name__ : str = [new_token_a]
with open(os.path.join(lowerCAmelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : int = tokenizer_class.from_pretrained(lowerCAmelCase__ , extra_ids=0 )
self.assertIn(lowerCAmelCase__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ : str = 0Xe0_07
__magic_name__ : Any = chr(lowerCAmelCase__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Optional[int] = [AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ )]
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , extra_ids=0 )
self.assertIn(lowerCAmelCase__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[Any] = """hello world"""
if self.space_between_special_tokens:
__magic_name__ : str = """[CLS] hello world [SEP]"""
else:
__magic_name__ : str = input
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__magic_name__ : Dict = tokenizer.decode(lowerCAmelCase__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase__ , [output, output.lower()] )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Tuple = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__magic_name__ : List[Any] = """a"""
__magic_name__ : List[str] = ord(lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + """_id""" , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + """_id""" ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + """_id""" , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + """_id""" ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens_ids""" ) , [] )
__magic_name__ : Dict = 0Xe0_06
__magic_name__ : List[Any] = chr(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def __magic_name__ ( self ) -> List[str]:
pass
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
pass
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> int:
pass
def __magic_name__ ( self ) -> Optional[Any]:
pass
def __magic_name__ ( self ) -> Dict:
pass
def __magic_name__ ( self ) -> Union[str, Any]:
pass
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__magic_name__: List[str] = logging.get_logger(__name__)
@dataclass
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowerCAmelCase__ ) -> Optional[int]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__magic_name__ : Optional[Any] = deprecated_arg[3:]
__magic_name__ : Union[str, Any] = not kwargs.pop(lowerCAmelCase__ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__magic_name__ : Optional[int] = kwargs.pop("""tpu_name""" , self.tpu_name )
__magic_name__ : Dict = kwargs.pop("""device_idx""" , self.device_idx )
__magic_name__ : List[str] = kwargs.pop("""eager_mode""" , self.eager_mode )
__magic_name__ : int = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**lowerCAmelCase__ )
lowercase__ : str = field(
default=_lowerCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
lowercase__ : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
lowercase__ : bool = field(
default=_lowerCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def __magic_name__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__magic_name__ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
__magic_name__ : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__magic_name__ : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__magic_name__ : Any = None
return tpu
@cached_property
def __magic_name__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__magic_name__ : str = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__magic_name__ : Any = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__magic_name__ : Optional[int] = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def __magic_name__ ( self ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __magic_name__ ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __magic_name__ ( self ) -> Optional[Any]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __magic_name__ ( self ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __magic_name__ ( self ) -> bool:
return self.n_gpu > 0
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = [1]
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = 0, 0, 0
__magic_name__ : List[Any] = ugly_nums[ia] * 2
__magic_name__ : Dict = ugly_nums[ia] * 3
__magic_name__ : str = ugly_nums[ia] * 5
for _ in range(1, _A ):
__magic_name__ : int = min(_A, _A, _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
__magic_name__ : str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__magic_name__ : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__magic_name__ : Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 342 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | 1 |
from __future__ import annotations
import math
__magic_name__: str = "2020.9.26"
__magic_name__: Any = "xcodz-dot, cclaus, dhruvmanila"
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
if not all(isinstance(_A, (float, int) ) for val in locals().values() ):
__magic_name__ : str = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(_A )
__magic_name__ : List[Any] = ((x * distance) / (z + distance)) * scale
__magic_name__ : Optional[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
raise TypeError("""Axis must be a str""" )
__magic_name__ : Any = locals()
del input_variables["axis"]
if not all(isinstance(_A, (float, int) ) for val in input_variables.values() ):
__magic_name__ : Any = (
"""Input values except axis must either be float or int: """
f'{list(input_variables.values() )}'
)
raise TypeError(_A )
__magic_name__ : Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__magic_name__ : Any = x * math.cos(_A ) - y * math.sin(_A )
__magic_name__ : Tuple = y * math.cos(_A ) + x * math.sin(_A )
__magic_name__ : Optional[Any] = z
elif axis == "x":
__magic_name__ : Union[str, Any] = y * math.cos(_A ) - z * math.sin(_A )
__magic_name__ : Optional[Any] = z * math.cos(_A ) + y * math.sin(_A )
__magic_name__ : List[str] = x
elif axis == "y":
__magic_name__ : Tuple = x * math.cos(_A ) - z * math.sin(_A )
__magic_name__ : List[Any] = z * math.cos(_A ) + x * math.sin(_A )
__magic_name__ : Optional[int] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__: str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__: Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[Any] = state_dict.pop(_A )
__magic_name__ : str = val
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ : int = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
__magic_name__ : Tuple = value
else:
__magic_name__ : Dict = value
return new_state_dict
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__magic_name__ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Optional[int] = in_proj_weight[:256, :]
__magic_name__ : Optional[Any] = in_proj_bias[:256]
__magic_name__ : Union[str, Any] = in_proj_weight[256:512, :]
__magic_name__ : List[str] = in_proj_bias[256:512]
__magic_name__ : List[Any] = in_proj_weight[-256:, :]
__magic_name__ : Tuple = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__magic_name__ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__magic_name__ : List[str] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : List[Any] = in_proj_weight[:256, :]
__magic_name__ : List[Any] = in_proj_bias[:256]
__magic_name__ : List[str] = in_proj_weight[256:512, :]
__magic_name__ : List[str] = in_proj_bias[256:512]
__magic_name__ : Dict = in_proj_weight[-256:, :]
__magic_name__ : int = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__magic_name__ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
__magic_name__ : int = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__magic_name__ : str = in_proj_weight_cross_attn[:256, :]
__magic_name__ : int = in_proj_bias_cross_attn[:256]
__magic_name__ : List[str] = in_proj_weight_cross_attn[256:512, :]
__magic_name__ : List[str] = in_proj_bias_cross_attn[256:512]
__magic_name__ : int = in_proj_weight_cross_attn[-256:, :]
__magic_name__ : Optional[Any] = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Optional[int] = image.size
__magic_name__ : int = max(_A, _A )
__magic_name__ : Union[str, Any] = 800 if """detection""" in checkpoint_url else 1000
__magic_name__ : str = target_max_size / current_max_size
__magic_name__ : Optional[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = F.to_tensor(_A )
__magic_name__ : str = F.normalize(_A, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
__magic_name__ : int = torch.hub.load_state_dict_from_url(_A, map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_A, _A, _A )
__magic_name__ : List[str] = rename_backbone_keys(_A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ : Union[str, Any] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__magic_name__ : int = state_dict.pop(_A )
__magic_name__ : int = val
# create HuggingFace model and load state dict
__magic_name__ : Optional[Any] = TableTransformerConfig(
backbone="""resnet18""", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
__magic_name__ : Optional[int] = 15
__magic_name__ : Tuple = 2
__magic_name__ : str = {0: """table""", 1: """table rotated"""}
__magic_name__ : int = idalabel
__magic_name__ : Tuple = {v: k for k, v in idalabel.items()}
else:
__magic_name__ : int = 125
__magic_name__ : List[str] = 6
__magic_name__ : str = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__magic_name__ : str = idalabel
__magic_name__ : Optional[Any] = {v: k for k, v in idalabel.items()}
__magic_name__ : int = DetrImageProcessor(
format="""coco_detection""", max_size=800 if """detection""" in checkpoint_url else 1000 )
__magic_name__ : str = TableTransformerForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion
__magic_name__ : Dict = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__magic_name__ : Optional[Any] = hf_hub_download(repo_id="""nielsr/example-pdf""", repo_type="""dataset""", filename=_A )
__magic_name__ : int = Image.open(_A ).convert("""RGB""" )
__magic_name__ : Any = normalize(resize(_A, _A ) ).unsqueeze(0 )
__magic_name__ : Optional[int] = model(_A )
if "detection" in checkpoint_url:
__magic_name__ : Tuple = (1, 15, 3)
__magic_name__ : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__magic_name__ : List[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__magic_name__ : Any = (1, 125, 7)
__magic_name__ : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__magic_name__ : Optional[int] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], _A, atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], _A, atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__magic_name__ : Optional[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_A )
image_processor.push_to_hub(_A )
if __name__ == "__main__":
__magic_name__: int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__: Tuple = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = SwinConfig()
__magic_name__ : Any = swin_name.split("""_""" )
__magic_name__ : List[Any] = name_split[1]
__magic_name__ : Union[str, Any] = int(name_split[4] )
__magic_name__ : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
__magic_name__ : int = 96
__magic_name__ : Any = (2, 2, 6, 2)
__magic_name__ : Optional[int] = (3, 6, 12, 24)
elif model_size == "small":
__magic_name__ : List[str] = 96
__magic_name__ : List[Any] = (2, 2, 18, 2)
__magic_name__ : Tuple = (3, 6, 12, 24)
elif model_size == "base":
__magic_name__ : List[str] = 128
__magic_name__ : Optional[int] = (2, 2, 18, 2)
__magic_name__ : Union[str, Any] = (4, 8, 16, 32)
else:
__magic_name__ : Optional[Any] = 192
__magic_name__ : List[Any] = (2, 2, 18, 2)
__magic_name__ : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__magic_name__ : List[str] = 21841
else:
__magic_name__ : Dict = 1000
__magic_name__ : Union[str, Any] = """huggingface/label-files"""
__magic_name__ : Optional[int] = """imagenet-1k-id2label.json"""
__magic_name__ : str = json.load(open(hf_hub_download(_A, _A, repo_type="""dataset""" ), """r""" ) )
__magic_name__ : List[str] = {int(_A ): v for k, v in idalabel.items()}
__magic_name__ : Union[str, Any] = idalabel
__magic_name__ : Any = {v: k for k, v in idalabel.items()}
__magic_name__ : int = img_size
__magic_name__ : int = num_classes
__magic_name__ : str = embed_dim
__magic_name__ : List[Any] = depths
__magic_name__ : List[Any] = num_heads
__magic_name__ : List[str] = window_size
return config
def UpperCamelCase ( _A ):
"""simple docstring"""
if "patch_embed.proj" in name:
__magic_name__ : str = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__magic_name__ : Optional[Any] = name.replace("""patch_embed.norm""", """embeddings.norm""" )
if "layers" in name:
__magic_name__ : Any = """encoder.""" + name
if "attn.proj" in name:
__magic_name__ : str = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
__magic_name__ : str = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
__magic_name__ : Optional[int] = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
__magic_name__ : Tuple = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__ : str = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__ : List[str] = name.replace("""mlp.fc2""", """output.dense""" )
if name == "norm.weight":
__magic_name__ : List[Any] = """layernorm.weight"""
if name == "norm.bias":
__magic_name__ : Any = """layernorm.bias"""
if "head" in name:
__magic_name__ : Any = name.replace("""head""", """classifier""" )
else:
__magic_name__ : List[str] = """swin.""" + name
return name
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ : Dict = orig_state_dict.pop(_A )
if "mask" in key:
continue
elif "qkv" in key:
__magic_name__ : int = key.split(""".""" )
__magic_name__ : Dict = int(key_split[1] )
__magic_name__ : Tuple = int(key_split[3] )
__magic_name__ : Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ : Dict = val[:dim, :]
__magic_name__ : Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ : Tuple = val[-dim:, :]
else:
__magic_name__ : List[Any] = val[
:dim
]
__magic_name__ : Optional[Any] = val[
dim : dim * 2
]
__magic_name__ : Union[str, Any] = val[
-dim:
]
else:
__magic_name__ : Any = val
return orig_state_dict
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : str = timm.create_model(_A, pretrained=_A )
timm_model.eval()
__magic_name__ : Optional[Any] = get_swin_config(_A )
__magic_name__ : Dict = SwinForImageClassification(_A )
model.eval()
__magic_name__ : int = convert_state_dict(timm_model.state_dict(), _A )
model.load_state_dict(_A )
__magic_name__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ : str = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""", """-""" ) ) )
__magic_name__ : List[Any] = Image.open(requests.get(_A, stream=_A ).raw )
__magic_name__ : Dict = image_processor(images=_A, return_tensors="""pt""" )
__magic_name__ : Optional[int] = timm_model(inputs["""pixel_values"""] )
__magic_name__ : Optional[Any] = model(**_A ).logits
assert torch.allclose(_A, _A, atol=1e-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__magic_name__: Union[str, Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 342 |
from math import factorial
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_A, _A ) or not isinstance(_A, _A ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__magic_name__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__magic_name__ : Any = float(factorial(_A ) )
coefficient /= factorial(_A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 342 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__magic_name__: str = logging.get_logger(__name__)
# General docstring
__magic_name__: Dict = "MobileNetV1Config"
# Base docstring
__magic_name__: Union[str, Any] = "google/mobilenet_v1_1.0_224"
__magic_name__: List[str] = [1, 1_024, 7, 7]
# Image classification docstring
__magic_name__: List[Any] = "google/mobilenet_v1_1.0_224"
__magic_name__: Any = "tabby, tabby cat"
__magic_name__: Any = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase ( _A, _A, _A=None ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = {}
if isinstance(_A, _A ):
__magic_name__ : Optional[Any] = model.mobilenet_va
else:
__magic_name__ : List[str] = model
__magic_name__ : Union[str, Any] = """MobilenetV1/Conv2d_0/"""
__magic_name__ : Tuple = backbone.conv_stem.convolution.weight
__magic_name__ : Optional[Any] = backbone.conv_stem.normalization.bias
__magic_name__ : Any = backbone.conv_stem.normalization.weight
__magic_name__ : Union[str, Any] = backbone.conv_stem.normalization.running_mean
__magic_name__ : str = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__magic_name__ : Union[str, Any] = i + 1
__magic_name__ : Any = i * 2
__magic_name__ : Any = backbone.layer[pt_index]
__magic_name__ : Tuple = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
__magic_name__ : Union[str, Any] = pointer.convolution.weight
__magic_name__ : List[Any] = pointer.normalization.bias
__magic_name__ : Optional[Any] = pointer.normalization.weight
__magic_name__ : Union[str, Any] = pointer.normalization.running_mean
__magic_name__ : str = pointer.normalization.running_var
__magic_name__ : Optional[int] = backbone.layer[pt_index + 1]
__magic_name__ : Tuple = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
__magic_name__ : List[Any] = pointer.convolution.weight
__magic_name__ : int = pointer.normalization.bias
__magic_name__ : List[str] = pointer.normalization.weight
__magic_name__ : Union[str, Any] = pointer.normalization.running_mean
__magic_name__ : Tuple = pointer.normalization.running_var
if isinstance(_A, _A ):
__magic_name__ : str = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
__magic_name__ : Optional[int] = model.classifier.weight
__magic_name__ : List[Any] = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__magic_name__ : Dict = tf.train.list_variables(_A )
__magic_name__ : Dict = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
__magic_name__ : List[str] = tf.train.load_variable(_A, _A )
__magic_name__ : str = array
# Build TF to PyTorch weights loading map
__magic_name__ : Optional[Any] = _build_tf_to_pytorch_map(_A, _A, _A )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
__magic_name__ : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__magic_name__ : Optional[int] = np.transpose(_A, (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__magic_name__ : List[str] = array.squeeze().transpose()
else:
__magic_name__ : Tuple = np.transpose(_A, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
__magic_name__ : Tuple = torch.from_numpy(_A )
tf_weights.pop(_A, _A )
tf_weights.pop(name + """/RMSProp""", _A )
tf_weights.pop(name + """/RMSProp_1""", _A )
tf_weights.pop(name + """/ExponentialMovingAverage""", _A )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ ,__magic_name__ : Tuple = features.shape[-2:]
__magic_name__ ,__magic_name__ : List[Any] = conv_layer.stride
__magic_name__ ,__magic_name__ : Union[str, Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
__magic_name__ : Tuple = max(kernel_height - stride_height, 0 )
else:
__magic_name__ : Any = max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__magic_name__ : Optional[int] = max(kernel_width - stride_width, 0 )
else:
__magic_name__ : str = max(kernel_width - (in_width % stride_width), 0 )
__magic_name__ : List[Any] = pad_along_width // 2
__magic_name__ : Dict = pad_along_width - pad_left
__magic_name__ : int = pad_along_height // 2
__magic_name__ : List[Any] = pad_along_height - pad_top
__magic_name__ : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_A, _A, """constant""", 0.0 )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> None:
super().__init__()
__magic_name__ : List[Any] = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
__magic_name__ : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__magic_name__ : List[str] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="""zeros""" , )
if use_normalization:
__magic_name__ : Optional[Any] = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
__magic_name__ : Union[str, Any] = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__ ):
__magic_name__ : Dict = ACTaFN[config.hidden_act]
else:
__magic_name__ : List[Any] = config.hidden_act
else:
__magic_name__ : Optional[int] = None
def __magic_name__ ( self , lowerCAmelCase__ ) -> torch.Tensor:
if self.config.tf_padding:
__magic_name__ : str = apply_tf_padding(lowerCAmelCase__ , self.convolution )
__magic_name__ : Union[str, Any] = self.convolution(lowerCAmelCase__ )
if self.normalization is not None:
__magic_name__ : Any = self.normalization(lowerCAmelCase__ )
if self.activation is not None:
__magic_name__ : Optional[Any] = self.activation(lowerCAmelCase__ )
return features
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Tuple = MobileNetVaConfig
lowercase__ : List[Any] = load_tf_weights_in_mobilenet_va
lowercase__ : Union[str, Any] = '''mobilenet_v1'''
lowercase__ : Dict = '''pixel_values'''
lowercase__ : Dict = False
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__magic_name__: List[Any] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__magic_name__: Tuple = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , _lowerCAmelCase , )
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True ) -> Optional[Any]:
super().__init__(lowerCAmelCase__ )
__magic_name__ : List[Any] = config
__magic_name__ : Dict = 32
__magic_name__ : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
__magic_name__ : Optional[Any] = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
__magic_name__ : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__magic_name__ : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
__magic_name__ : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__magic_name__ : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) )
__magic_name__ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __magic_name__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__magic_name__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__magic_name__ : List[Any] = self.conv_stem(lowerCAmelCase__ )
__magic_name__ : int = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__magic_name__ : Optional[Any] = layer_module(lowerCAmelCase__ )
if output_hidden_states:
__magic_name__ : List[Any] = all_hidden_states + (hidden_states,)
__magic_name__ : Tuple = hidden_states
if self.pooler is not None:
__magic_name__ : Tuple = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 )
else:
__magic_name__ : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCAmelCase , )
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> None:
super().__init__(lowerCAmelCase__ )
__magic_name__ : Dict = config.num_labels
__magic_name__ : Optional[Any] = MobileNetVaModel(lowerCAmelCase__ )
__magic_name__ : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__magic_name__ : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ )
__magic_name__ : int = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __magic_name__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__magic_name__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ : List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
__magic_name__ : Dict = outputs.pooler_output if return_dict else outputs[1]
__magic_name__ : str = self.classifier(self.dropout(lowerCAmelCase__ ) )
__magic_name__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ : Optional[int] = """single_label_classification"""
else:
__magic_name__ : Dict = """multi_label_classification"""
if self.config.problem_type == "regression":
__magic_name__ : Optional[Any] = MSELoss()
if self.num_labels == 1:
__magic_name__ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ : Optional[Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
__magic_name__ : Optional[int] = CrossEntropyLoss()
__magic_name__ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ : Optional[Any] = BCEWithLogitsLoss()
__magic_name__ : Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
__magic_name__ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 342 |
from __future__ import annotations
def UpperCamelCase ( _A ): # This function is recursive
"""simple docstring"""
__magic_name__ : str = len(_A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__magic_name__ : Dict = array[0]
__magic_name__ : Optional[Any] = False
__magic_name__ : Tuple = 1
__magic_name__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[Any] = [element for element in array[i:] if element >= array[i]]
__magic_name__ : Dict = longest_subsequence(_A )
if len(_A ) > len(_A ):
__magic_name__ : Tuple = temp_array
else:
i += 1
__magic_name__ : Any = [element for element in array[1:] if element >= pivot]
__magic_name__ : Dict = [pivot, *longest_subsequence(_A )]
if len(_A ) > len(_A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase ( _A ):
"""simple docstring"""
if not sentence:
return ""
__magic_name__ : int = dict(zip(_A, _A ) )
return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 342 |
import argparse
import os
import re
__magic_name__: Optional[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__magic_name__: Any = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__magic_name__: Tuple = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase ( _A, _A = False ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Any = f.read()
__magic_name__ : List[Any] = content.split("""\n""" )
__magic_name__ : List[str] = []
__magic_name__ : Union[str, Any] = 0
while line_idx < len(_A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__magic_name__ : Any = len(re.search(R"""^(\s*)\S""", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__magic_name__ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__magic_name__ : List[str] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__magic_name__ : Union[str, Any] = sorted(_A, key=lambda _A : _re_identifier.search(_A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(_A ) )
elif "\n".join(_A ) != content:
return True
def UpperCamelCase ( _A = False ):
"""simple docstring"""
__magic_name__ : Any = [os.path.join(_A, _A ) for f in os.listdir(_A ) if f.endswith(""".py""" )]
__magic_name__ : List[str] = [sort_auto_mapping(_A, overwrite=_A ) for fname in fnames]
if not overwrite and any(_A ):
__magic_name__ : Optional[Any] = [f for f, d in zip(_A, _A ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_A )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__magic_name__: List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = AlbertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Union[str, Any] = AlbertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
from __future__ import annotations
from random import choice
def UpperCamelCase ( _A ):
"""simple docstring"""
return choice(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : int = random_pivot(_A )
# partition based on pivot
# linear time
__magic_name__ : List[str] = [e for e in lst if e < pivot]
__magic_name__ : str = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_A ) < k - 1:
return kth_number(_A, k - len(_A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_A, _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__magic_name__ : int = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
__magic_name__: Dict = input("Enter a string ").strip()
__magic_name__: Union[str, Any] = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 342 | 1 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ):
__magic_name__ : Any = f'Input value of [number={number}] must be an integer'
raise TypeError(_A )
if number < 0:
return False
__magic_name__ : Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342 | 1 |
import os
def UpperCamelCase ( ):
"""simple docstring"""
with open(os.path.dirname(_A ) + """/p022_names.txt""" ) as file:
__magic_name__ : Optional[int] = str(file.readlines()[0] )
__magic_name__ : Optional[Any] = names.replace("""\"""", """""" ).split(""",""" )
names.sort()
__magic_name__ : Optional[Any] = 0
__magic_name__ : Optional[Any] = 0
for i, name in enumerate(_A ):
for letter in name:
name_score += ord(_A ) - 64
total_score += (i + 1) * name_score
__magic_name__ : Union[str, Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 342 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__: Optional[Any] = logging.get_logger(__name__)
__magic_name__: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = '''data2vec-vision'''
def __init__( self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-1_2 , lowerCAmelCase__=2_24 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=[3, 5, 7, 11] , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=2_56 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=2_55 , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
__magic_name__ : Dict = hidden_size
__magic_name__ : str = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : int = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : Dict = initializer_range
__magic_name__ : str = layer_norm_eps
__magic_name__ : Any = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : Any = use_mask_token
__magic_name__ : Optional[Any] = use_absolute_position_embeddings
__magic_name__ : Optional[Any] = use_relative_position_bias
__magic_name__ : List[Any] = use_shared_relative_position_bias
__magic_name__ : Dict = layer_scale_init_value
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__magic_name__ : List[str] = out_indices
__magic_name__ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
__magic_name__ : str = use_auxiliary_head
__magic_name__ : List[Any] = auxiliary_loss_weight
__magic_name__ : Union[str, Any] = auxiliary_channels
__magic_name__ : Optional[Any] = auxiliary_num_convs
__magic_name__ : Union[str, Any] = auxiliary_concat_input
__magic_name__ : Optional[Any] = semantic_loss_ignore_index
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[Any] = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 1e-4
| 342 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__magic_name__: Optional[int] = logging.get_logger(__name__)
# General docstring
__magic_name__: Optional[Any] = "PoolFormerConfig"
# Base docstring
__magic_name__: Optional[int] = "sail/poolformer_s12"
__magic_name__: str = [1, 512, 7, 7]
# Image classification docstring
__magic_name__: List[Any] = "sail/poolformer_s12"
__magic_name__: Union[str, Any] = "tabby, tabby cat"
__magic_name__: str = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase ( _A, _A = 0.0, _A = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
__magic_name__ : Optional[int] = 1 - drop_prob
__magic_name__ : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__magic_name__ : List[Any] = keep_prob + torch.rand(_A, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
__magic_name__ : Tuple = input.div(_A ) * random_tensor
return output
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ = None ) -> None:
super().__init__()
__magic_name__ : List[Any] = drop_prob
def __magic_name__ ( self , lowerCAmelCase__ ) -> torch.Tensor:
return drop_path(lowerCAmelCase__ , self.drop_prob , self.training )
def __magic_name__ ( self ) -> str:
return "p={}".format(self.drop_prob )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Dict:
super().__init__()
__magic_name__ : str = patch_size if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
__magic_name__ : List[str] = stride if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (stride, stride)
__magic_name__ : Tuple = padding if isinstance(lowerCAmelCase__ , collections.abc.Iterable ) else (padding, padding)
__magic_name__ : int = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ )
__magic_name__ : int = norm_layer(lowerCAmelCase__ ) if norm_layer else nn.Identity()
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : List[str] = self.projection(lowerCAmelCase__ )
__magic_name__ : List[str] = self.norm(lowerCAmelCase__ )
return embeddings
class snake_case__ ( nn.GroupNorm ):
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
super().__init__(1 , lowerCAmelCase__ , **lowerCAmelCase__ )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
super().__init__()
__magic_name__ : List[Any] = nn.AvgPoolad(lowerCAmelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.pool(lowerCAmelCase__ ) - hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
super().__init__()
__magic_name__ : Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__magic_name__ : Any = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
__magic_name__ : Union[str, Any] = PoolFormerDropPath(lowerCAmelCase__ )
if isinstance(config.hidden_act , lowerCAmelCase__ ):
__magic_name__ : str = ACTaFN[config.hidden_act]
else:
__magic_name__ : Tuple = config.hidden_act
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
__magic_name__ : Optional[Any] = self.conva(lowerCAmelCase__ )
__magic_name__ : Tuple = self.act_fn(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.drop(lowerCAmelCase__ )
__magic_name__ : Dict = self.conva(lowerCAmelCase__ )
__magic_name__ : int = self.drop(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
super().__init__()
__magic_name__ : Optional[Any] = PoolFormerPooling(lowerCAmelCase__ )
__magic_name__ : Any = PoolFormerOutput(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[int] = PoolFormerGroupNorm(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = PoolFormerGroupNorm(lowerCAmelCase__ )
# Useful for training neural nets
__magic_name__ : Dict = PoolFormerDropPath(lowerCAmelCase__ ) if drop_path > 0.0 else nn.Identity()
__magic_name__ : List[Any] = config.use_layer_scale
if config.use_layer_scale:
__magic_name__ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
__magic_name__ : Dict = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCAmelCase__) ) , requires_grad=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
if self.use_layer_scale:
__magic_name__ : Optional[Any] = self.pooling(self.before_norm(lowerCAmelCase__ ) )
__magic_name__ : Optional[int] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__magic_name__ : List[Any] = hidden_states + self.drop_path(lowerCAmelCase__ )
__magic_name__ : Any = ()
__magic_name__ : Optional[int] = self.output(self.after_norm(lowerCAmelCase__ ) )
__magic_name__ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__magic_name__ : Dict = hidden_states + self.drop_path(lowerCAmelCase__ )
__magic_name__ : str = (output,) + outputs
return outputs
else:
__magic_name__ : List[Any] = self.drop_path(self.pooling(self.before_norm(lowerCAmelCase__ ) ) )
# First residual connection
__magic_name__ : List[str] = pooling_output + hidden_states
__magic_name__ : str = ()
# Second residual connection inside the PoolFormerOutput block
__magic_name__ : Tuple = self.drop_path(self.output(self.after_norm(lowerCAmelCase__ ) ) )
__magic_name__ : Tuple = hidden_states + layer_output
__magic_name__ : Dict = (output,) + outputs
return outputs
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
super().__init__()
__magic_name__ : int = config
# stochastic depth decay rule
__magic_name__ : Optional[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__magic_name__ : str = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__magic_name__ : Dict = nn.ModuleList(lowerCAmelCase__ )
# Transformer blocks
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__magic_name__ : List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowerCAmelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowerCAmelCase__ ) )
__magic_name__ : str = nn.ModuleList(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> int:
__magic_name__ : Optional[Any] = () if output_hidden_states else None
__magic_name__ : Optional[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__magic_name__ ,__magic_name__ : Tuple = layers
# Get patch embeddings from hidden_states
__magic_name__ : Tuple = embedding_layer(lowerCAmelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowerCAmelCase__ ):
__magic_name__ : List[str] = blk(lowerCAmelCase__ )
__magic_name__ : List[str] = layer_outputs[0]
if output_hidden_states:
__magic_name__ : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = PoolFormerConfig
lowercase__ : Union[str, Any] = '''poolformer'''
lowercase__ : Union[str, Any] = '''pixel_values'''
lowercase__ : Optional[Any] = True
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = value
__magic_name__: Tuple = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__magic_name__: List[Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , _lowerCAmelCase , )
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
__magic_name__ : List[Any] = config
__magic_name__ : Optional[Any] = PoolFormerEncoder(lowerCAmelCase__ )
# Initialize weights and apply final processing
self.post_init()
def __magic_name__ ( self ) -> Tuple:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __magic_name__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__magic_name__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__magic_name__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__magic_name__ : Tuple = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
__magic_name__ : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
class snake_case__ ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
super().__init__()
__magic_name__ : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.dense(lowerCAmelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , _lowerCAmelCase , )
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ ) -> Any:
super().__init__(lowerCAmelCase__ )
__magic_name__ : Any = config.num_labels
__magic_name__ : str = PoolFormerModel(lowerCAmelCase__ )
# Final norm
__magic_name__ : Union[str, Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__magic_name__ : Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __magic_name__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__magic_name__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ : List[str] = self.poolformer(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )
__magic_name__ : List[str] = outputs[0]
__magic_name__ : Optional[int] = self.classifier(self.norm(lowerCAmelCase__ ).mean([-2, -1] ) )
__magic_name__ : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__magic_name__ : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__magic_name__ : Tuple = """single_label_classification"""
else:
__magic_name__ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
__magic_name__ : Optional[int] = MSELoss()
if self.num_labels == 1:
__magic_name__ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__magic_name__ : Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
__magic_name__ : Optional[int] = CrossEntropyLoss()
__magic_name__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__magic_name__ : Tuple = BCEWithLogitsLoss()
__magic_name__ : Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
__magic_name__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 342 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__magic_name__: int = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__magic_name__: List[str] = "main"
# Default branch name
__magic_name__: str = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__magic_name__: List[Any] = "aaaaaaa"
# This commit does not exist, so we should 404.
__magic_name__: Tuple = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__magic_name__: Any = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCamelCase ( ):
"""simple docstring"""
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def UpperCamelCase ( ):
"""simple docstring"""
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Union[str, Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class snake_case__ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def __magic_name__ ( self ) -> str:
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""start_positions""", """end_positions"""] )
class snake_case__ ( _lowerCAmelCase ):
pass
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""labels"""] )
@require_tf
def __magic_name__ ( self ) -> Optional[Any]:
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""start_positions""", """end_positions"""] )
class snake_case__ ( _lowerCAmelCase ):
pass
self.assertEqual(find_labels(lowerCAmelCase__ ) , ["""labels"""] )
@require_flax
def __magic_name__ ( self ) -> int:
# Flax models don't have labels
self.assertEqual(find_labels(lowerCAmelCase__ ) , [] )
self.assertEqual(find_labels(lowerCAmelCase__ ) , [] )
self.assertEqual(find_labels(lowerCAmelCase__ ) , [] )
class snake_case__ ( _lowerCAmelCase ):
pass
self.assertEqual(find_labels(lowerCAmelCase__ ) , [] )
| 342 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[32, 64, 1_28] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ) -> str:
__magic_name__ : Optional[int] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[int] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : str = embed_dim
__magic_name__ : int = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : List[str] = num_heads
__magic_name__ : str = window_size
__magic_name__ : Optional[Any] = mlp_ratio
__magic_name__ : Dict = qkv_bias
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = use_absolute_embeddings
__magic_name__ : Dict = patch_norm
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[Any] = scope
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = encoder_stride
__magic_name__ : List[Any] = out_features
__magic_name__ : Union[str, Any] = out_indices
def __magic_name__ ( self ) -> str:
__magic_name__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[Any]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Any = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : Optional[Any] = None
__magic_name__ : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : int = self.type_sequence_label_size
__magic_name__ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> int:
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Dict = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : Any = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Dict = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = FocalNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> List[str]:
return
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : str = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
__magic_name__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = reshaped_hidden_states[0].shape
__magic_name__ : Union[str, Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[int] = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Dict = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : int = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ : Optional[int] = FocalNetConfig
lowercase__ : Dict = False
def __magic_name__ ( self ) -> int:
__magic_name__ : Dict = FocalNetModelTester(self )
| 342 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class snake_case__ ( nn.Module ):
lowercase__ : int
lowercase__ : jnp.dtype = jnp.floataa
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : Any = hidden_states.shape
__magic_name__ : str = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
__magic_name__ : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
lowercase__ : int
lowercase__ : jnp.dtype = jnp.floataa
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Dict:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__magic_name__ : str = self.conv(lowerCAmelCase__ )
return hidden_states
class snake_case__ ( nn.Module ):
lowercase__ : int
lowercase__ : int = None
lowercase__ : float = 0.0
lowercase__ : bool = None
lowercase__ : jnp.dtype = jnp.floataa
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
__magic_name__ : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__magic_name__ : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
__magic_name__ : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__magic_name__ : int = nn.Dropout(self.dropout_prob )
__magic_name__ : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__magic_name__ : Dict = None
if use_nin_shortcut:
__magic_name__ : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> Tuple:
__magic_name__ : int = hidden_states
__magic_name__ : str = self.norma(lowerCAmelCase__ )
__magic_name__ : Optional[int] = nn.swish(lowerCAmelCase__ )
__magic_name__ : Dict = self.conva(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
__magic_name__ : Union[str, Any] = hidden_states + temb
__magic_name__ : Union[str, Any] = self.norma(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = nn.swish(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
__magic_name__ : Any = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 342 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]:
__magic_name__ : int = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : str = num_channels
__magic_name__ : Dict = patch_size
__magic_name__ : Tuple = num_frames
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Tuple = attention_type
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__magic_name__ : str = (image_size // patch_size) ** 2
__magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__magic_name__ : Optional[Any] = self.num_labels
return config
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
# verify the logits shape
__magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : str = False
lowercase__ : Tuple = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = TimesformerModelTester(self )
__magic_name__ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
__magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Optional[int] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
for model_class in self.all_model_classes:
__magic_name__ : Tuple = self.model_tester.seq_length
__magic_name__ : int = self.model_tester.num_frames
__magic_name__ : Any = True
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = True
__magic_name__ : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[Any] = True
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : int = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__magic_name__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : Optional[Any] = True
__magic_name__ : int = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
__magic_name__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__magic_name__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
__magic_name__ : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowerCAmelCase__ )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Any = prepare_video()
__magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 342 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__ :
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Tuple = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ : List[Any] = len(lowerCAmelCase__ ) - 1
def __magic_name__ ( self , lowerCAmelCase__ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase__ ) , 5 ) == 1
return output_values
def __magic_name__ ( self , lowerCAmelCase__ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ : Any = self.basis_function(lowerCAmelCase__ )
__magic_name__ : Tuple = 0.0
__magic_name__ : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __magic_name__ ( self , lowerCAmelCase__ = 0.0_1 ) -> Optional[Any]:
from matplotlib import pyplot as plt # type: ignore
__magic_name__ : list[float] = [] # x coordinates of points to plot
__magic_name__ : list[float] = [] # y coordinates of points to plot
__magic_name__ : List[Any] = 0.0
while t <= 1:
__magic_name__ : List[Any] = self.bezier_curve_function(lowerCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ : Any = [i[0] for i in self.list_of_points]
__magic_name__ : Tuple = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase__ , lowerCAmelCase__ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 342 |
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = [0] * len(_A )
__magic_name__ : List[str] = []
__magic_name__ : List[str] = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
__magic_name__ : Dict = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__magic_name__ : int = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
__magic_name__: str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Union[str, Any] = []
for i in range(_A ):
__magic_name__ : Optional[int] = i / num_diffusion_timesteps
__magic_name__ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ), _A ) )
return torch.tensor(_A, dtype=torch.floataa )
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
lowercase__ : Tuple = 2
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = 0.0_0_0_8_5 , lowerCAmelCase__ = 0.0_1_2 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "linspace" , lowerCAmelCase__ = 0 , ) -> List[str]:
if trained_betas is not None:
__magic_name__ : Tuple = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__magic_name__ : Optional[Any] = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__magic_name__ : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__magic_name__ : Optional[int] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__magic_name__ : List[str] = 1.0 - self.betas
__magic_name__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
if schedule_timesteps is None:
__magic_name__ : Union[str, Any] = self.timesteps
__magic_name__ : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__magic_name__ : Tuple = 1 if len(lowerCAmelCase__ ) > 1 else 0
else:
__magic_name__ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
__magic_name__ : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self ) -> Optional[int]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
__magic_name__ : Union[str, Any] = self.index_for_timestep(lowerCAmelCase__ )
if self.state_in_first_order:
__magic_name__ : Dict = self.sigmas[step_index]
else:
__magic_name__ : List[Any] = self.sigmas_interpol[step_index]
__magic_name__ : str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> int:
__magic_name__ : str = num_inference_steps
__magic_name__ : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__magic_name__ : str = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__magic_name__ : Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__magic_name__ : Tuple = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__magic_name__ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__magic_name__ : Tuple = (np.arange(lowerCAmelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase__ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__magic_name__ : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__magic_name__ : Union[str, Any] = torch.from_numpy(np.log(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__magic_name__ : Tuple = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
__magic_name__ : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__magic_name__ : str = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ )
# interpolate sigmas
__magic_name__ : Any = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__magic_name__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__magic_name__ : Union[str, Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
# mps does not support float64
__magic_name__ : Any = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=torch.floataa )
else:
__magic_name__ : Union[str, Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
# interpolate timesteps
__magic_name__ : List[Any] = self.sigma_to_t(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=timesteps.dtype )
__magic_name__ : Any = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__magic_name__ : Optional[int] = torch.cat([timesteps[:1], interleaved_timesteps] )
__magic_name__ : Any = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__magic_name__ : List[str] = defaultdict(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
# get log sigma
__magic_name__ : str = sigma.log()
# get distribution
__magic_name__ : str = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__magic_name__ : Union[str, Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__magic_name__ : List[str] = low_idx + 1
__magic_name__ : Optional[int] = self.log_sigmas[low_idx]
__magic_name__ : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
__magic_name__ : Union[str, Any] = (low - log_sigma) / (low - high)
__magic_name__ : str = w.clamp(0 , 1 )
# transform interpolation to time range
__magic_name__ : Any = (1 - w) * low_idx + w * high_idx
__magic_name__ : List[Any] = t.view(sigma.shape )
return t
@property
def __magic_name__ ( self ) -> List[str]:
return self.sample is None
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
__magic_name__ : Optional[int] = self.index_for_timestep(lowerCAmelCase__ )
# advance index counter by 1
__magic_name__ : Optional[int] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__magic_name__ : List[Any] = self.sigmas[step_index]
__magic_name__ : List[Any] = self.sigmas_interpol[step_index + 1]
__magic_name__ : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__magic_name__ : int = self.sigmas[step_index - 1]
__magic_name__ : Tuple = self.sigmas_interpol[step_index]
__magic_name__ : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__magic_name__ : Optional[Any] = 0
__magic_name__ : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__magic_name__ : Any = sigma_hat if self.state_in_first_order else sigma_interpol
__magic_name__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__magic_name__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__magic_name__ : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__magic_name__ : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__magic_name__ : str = sigma_interpol - sigma_hat
# store for 2nd order step
__magic_name__ : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__magic_name__ : Optional[int] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__magic_name__ : List[str] = sigma_next - sigma_hat
__magic_name__ : Tuple = self.sample
__magic_name__ : Tuple = None
__magic_name__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__magic_name__ : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ):
# mps does not support float64
__magic_name__ : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__magic_name__ : Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__magic_name__ : int = self.timesteps.to(original_samples.device )
__magic_name__ : Optional[int] = timesteps.to(original_samples.device )
__magic_name__ : Dict = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__ ) for t in timesteps]
__magic_name__ : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__magic_name__ : int = sigma.unsqueeze(-1 )
__magic_name__ : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 342 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = """ylacombe/bark-small"""
__magic_name__ : List[str] = tempfile.mkdtemp()
__magic_name__ : Optional[Any] = """en_speaker_1"""
__magic_name__ : Union[str, Any] = """This is a test string"""
__magic_name__ : Optional[int] = """speaker_embeddings_path.json"""
__magic_name__ : Any = """speaker_embeddings"""
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : int = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ) -> Any:
__magic_name__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__magic_name__ : Union[str, Any] = 35
__magic_name__ : List[Any] = 2
__magic_name__ : Dict = 8
__magic_name__ : Tuple = {
"""semantic_prompt""": np.ones(lowerCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__magic_name__ : Optional[int] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__magic_name__ : Dict = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__magic_name__ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__magic_name__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Dict = BarkProcessor(tokenizer=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = processor(text=self.input_string )
__magic_name__ : List[Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = do_resize
__magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88}
__magic_name__ : Union[str, Any] = size_divisor
__magic_name__ : Union[str, Any] = do_rescale
__magic_name__ : Dict = rescale_factor
__magic_name__ : Union[str, Any] = do_normalize
__magic_name__ : List[str] = do_center_crop
__magic_name__ : Tuple = image_mean
__magic_name__ : Tuple = image_std
__magic_name__ : Tuple = do_pad
__magic_name__ : int = batch_size
__magic_name__ : List[Any] = num_channels
__magic_name__ : int = min_resolution
__magic_name__ : str = max_resolution
def __magic_name__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
if not batched:
__magic_name__ : Dict = self.size["""shortest_edge"""]
__magic_name__ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__magic_name__ ,__magic_name__ : List[Any] = image.size
else:
__magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2]
__magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__magic_name__ ,__magic_name__ : str = size, scale * w
else:
__magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size
__magic_name__ : Tuple = int((13_33 / 8_00) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = newh * scale
__magic_name__ : Any = neww * scale
__magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 )
__magic_name__ ,__magic_name__ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__magic_name__ : Union[str, Any] = []
for image in image_inputs:
__magic_name__ ,__magic_name__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
__magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = BridgeTowerImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Any:
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def __magic_name__ ( self ) -> Optional[int]:
pass
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image processor
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __magic_name__ ( self ) -> str:
# Initialize image processor
__magic_name__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 342 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__magic_name__: Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = UniSpeechSatForSequenceClassification.from_pretrained(_A, config=_A )
__magic_name__ : int = downstream_dict["""projector.weight"""]
__magic_name__ : int = downstream_dict["""projector.bias"""]
__magic_name__ : int = downstream_dict["""model.post_net.linear.weight"""]
__magic_name__ : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[str] = UniSpeechSatForAudioFrameClassification.from_pretrained(_A, config=_A )
__magic_name__ : Optional[int] = downstream_dict["""model.linear.weight"""]
__magic_name__ : Any = downstream_dict["""model.linear.bias"""]
return model
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : str = UniSpeechSatForXVector.from_pretrained(_A, config=_A )
__magic_name__ : Optional[Any] = downstream_dict["""connector.weight"""]
__magic_name__ : List[str] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__magic_name__ : List[Any] = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__magic_name__ : Any = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__magic_name__ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__magic_name__ : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__magic_name__ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__magic_name__ : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__magic_name__ : int = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Any = torch.load(_A, map_location="""cpu""" )
__magic_name__ : Dict = checkpoint["""Downstream"""]
__magic_name__ : Dict = UniSpeechSatConfig.from_pretrained(_A )
__magic_name__ : int = WavaVecaFeatureExtractor.from_pretrained(
_A, return_attention_mask=_A, do_normalize=_A )
__magic_name__ : Dict = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__magic_name__ : List[str] = convert_classification(_A, _A, _A )
elif arch.endswith("""ForAudioFrameClassification""" ):
__magic_name__ : List[Any] = convert_diarization(_A, _A, _A )
elif arch.endswith("""ForXVector""" ):
__magic_name__ : Optional[Any] = convert_xvector(_A, _A, _A )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__magic_name__ : str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_A )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
__magic_name__: Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__magic_name__: List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 342 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__magic_name__: Tuple = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''facebook/nllb-200-distilled-600M'''
lowercase__ : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowercase__ : List[str] = '''translator'''
lowercase__ : Optional[Any] = AutoTokenizer
lowercase__ : int = AutoModelForSeqaSeqLM
lowercase__ : List[Any] = LANGUAGE_CODES
lowercase__ : str = ['''text''', '''text''', '''text''']
lowercase__ : Any = ['''text''']
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__magic_name__ : Tuple = self.lang_to_code[src_lang]
__magic_name__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: Union[str, Any] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[Any] = '''open-llama'''
def __init__( self , lowerCAmelCase__=10_00_00 , lowerCAmelCase__=40_96 , lowerCAmelCase__=1_10_08 , lowerCAmelCase__=32 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=20_48 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-6 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
__magic_name__ : int = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : str = hidden_size
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : Tuple = hidden_act
__magic_name__ : str = initializer_range
__magic_name__ : int = rms_norm_eps
__magic_name__ : Dict = use_cache
__magic_name__ : Optional[int] = kwargs.pop(
"""use_memorry_efficient_attention""" , lowerCAmelCase__ )
__magic_name__ : Optional[int] = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_dropout_prob
__magic_name__ : List[Any] = use_stable_embedding
__magic_name__ : List[str] = shared_input_output_embedding
__magic_name__ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__ ( self ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
__magic_name__ : Optional[Any] = self.rope_scaling.get("""type""" , lowerCAmelCase__ )
__magic_name__ : Optional[int] = self.rope_scaling.get("""factor""" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: Optional[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = '''mctct'''
def __init__( self , lowerCAmelCase__=80_65 , lowerCAmelCase__=15_36 , lowerCAmelCase__=36 , lowerCAmelCase__=61_44 , lowerCAmelCase__=4 , lowerCAmelCase__=3_84 , lowerCAmelCase__=9_20 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.3 , lowerCAmelCase__="relu" , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.3 , lowerCAmelCase__=0.3 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0.3 , lowerCAmelCase__=1 , lowerCAmelCase__=(7,) , lowerCAmelCase__=(3,) , lowerCAmelCase__=80 , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__="sum" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__magic_name__ : int = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : str = intermediate_size
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : int = attention_head_dim
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : List[Any] = layer_norm_eps
__magic_name__ : List[str] = layerdrop
__magic_name__ : Dict = hidden_act
__magic_name__ : int = initializer_range
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : List[Any] = eos_token_id
__magic_name__ : List[Any] = conv_glu_dim
__magic_name__ : str = conv_dropout
__magic_name__ : List[Any] = num_conv_layers
__magic_name__ : Any = input_feat_per_channel
__magic_name__ : Optional[Any] = input_channels
__magic_name__ : int = conv_channels
__magic_name__ : List[Any] = ctc_loss_reduction
__magic_name__ : str = ctc_zero_infinity
# prevents config testing fail with exporting to json
__magic_name__ : Union[str, Any] = list(lowerCAmelCase__ )
__magic_name__ : List[Any] = list(lowerCAmelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__magic_name__: int = get_logger()
__magic_name__: Optional[dict] = None
class snake_case__ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
super().__init__(features=lowerCAmelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(lowerCAmelCase__ )}, as `jaxlib.xla_extension.Device` '
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
__magic_name__ : Any = device if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__magic_name__ : int = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
__magic_name__ : str = str(jax.devices()[0] )
__magic_name__ : Optional[int] = jnp_array_kwargs
@staticmethod
def __magic_name__ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(lowerCAmelCase__ ): device for device in jax.devices()}
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and column:
if all(
isinstance(lowerCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCAmelCase__ , axis=0 )
return column
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase__ , (str, bytes, type(lowerCAmelCase__ )) ):
return value
elif isinstance(lowerCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__magic_name__ : List[Any] = {}
if isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__magic_name__ : List[str] = {"""dtype""": jnp.intaa}
else:
__magic_name__ : Any = {"""dtype""": jnp.intaa}
elif isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__magic_name__ : int = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__magic_name__ : Union[str, Any] = np.asarray(lowerCAmelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__magic_name__ : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCAmelCase__ , """__array__""" ) and not isinstance(lowerCAmelCase__ , jax.Array ):
__magic_name__ : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return map_nested(self._recursive_tensorize , lowerCAmelCase__ , map_list=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Mapping:
__magic_name__ : Any = self.numpy_arrow_extractor().extract_row(lowerCAmelCase__ )
__magic_name__ : int = self.python_features_decoder.decode_row(lowerCAmelCase__ )
return self.recursive_tensorize(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> "jax.Array":
__magic_name__ : List[Any] = self.numpy_arrow_extractor().extract_column(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.python_features_decoder.decode_column(lowerCAmelCase__ , pa_table.column_names[0] )
__magic_name__ : Union[str, Any] = self.recursive_tensorize(lowerCAmelCase__ )
__magic_name__ : List[Any] = self._consolidate(lowerCAmelCase__ )
return column
def __magic_name__ ( self , lowerCAmelCase__ ) -> Mapping:
__magic_name__ : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase__ )
__magic_name__ : Any = self.python_features_decoder.decode_batch(lowerCAmelCase__ )
__magic_name__ : Optional[int] = self.recursive_tensorize(lowerCAmelCase__ )
for column_name in batch:
__magic_name__ : str = self._consolidate(batch[column_name] )
return batch
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=10 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[1, 1, 2, 1] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=3 , lowerCAmelCase__=None , ) -> Tuple:
__magic_name__ : int = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Any = num_channels
__magic_name__ : Dict = embeddings_size
__magic_name__ : Union[str, Any] = hidden_sizes
__magic_name__ : Any = depths
__magic_name__ : List[Any] = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : Any = hidden_act
__magic_name__ : int = num_labels
__magic_name__ : Optional[Any] = scope
__magic_name__ : Dict = len(lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : int = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : Optional[Any] = TFRegNetModel(config=lowerCAmelCase__ )
__magic_name__ : Optional[int] = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : List[str] = self.num_labels
__magic_name__ : Any = TFRegNetForImageClassification(lowerCAmelCase__ )
__magic_name__ : Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs
__magic_name__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase__ : Optional[int] = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase__ : List[str] = False
lowercase__ : List[str] = False
lowercase__ : Optional[Any] = False
lowercase__ : Tuple = False
lowercase__ : int = False
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Any = TFRegNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __magic_name__ ( self ) -> Any:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = model_class(lowerCAmelCase__ )
__magic_name__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : List[str] = [*signature.parameters.keys()]
__magic_name__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Optional[int] = model_class(lowerCAmelCase__ )
__magic_name__ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__magic_name__ ,__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ : Optional[int] = layer_type
__magic_name__ : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Any = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__={} ):
__magic_name__ : Any = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ ).to_tuple()
def recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase__ , lowerCAmelCase__ ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
for model_class in self.all_model_classes:
__magic_name__ : int = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__magic_name__ : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"""output_hidden_states""": True} )
__magic_name__ : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__magic_name__ : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"""output_hidden_states""": True} )
def __magic_name__ ( self ) -> str:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Optional[int]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = TFRegNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ : Dict = self.default_image_processor
__magic_name__ : str = prepare_img()
__magic_name__ : Dict = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" )
# forward pass
__magic_name__ : Any = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
__magic_name__ : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : int = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
| 342 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Dict = MobileBertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Tuple = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
__magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 342 | 1 |
def UpperCamelCase ( _A = 2000000 ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = [0 for i in range(n + 1 )]
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, _A ):
__magic_name__ : Union[str, Any] = 1
__magic_name__ : Optional[int] = 0
for i in range(_A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase ( _A = "laptop" ):
"""simple docstring"""
__magic_name__ : str = f'https://www.amazon.in/laptop/s?k={product}'
__magic_name__ : Optional[int] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__magic_name__ : str = BeautifulSoup(requests.get(_A, headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : Tuple = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""", attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""}, ), soup.find_all("""div""", attrs={"""class""": """a-row a-size-base a-color-base"""} ), ):
try:
__magic_name__ : Optional[int] = item.ha.text
__magic_name__ : Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
__magic_name__ : str = item.find("""span""", attrs={"""class""": """a-offscreen"""} ).text
try:
__magic_name__ : str = item.find("""span""", attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__magic_name__ : str = """Not available"""
try:
__magic_name__ : Any = (
"""₹"""
+ item.find(
"""span""", attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__magic_name__ : str = """"""
try:
__magic_name__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""", """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""", """""" ) )
)
* 100 )
except ValueError:
__magic_name__ : Optional[Any] = float("""nan""" )
except AttributeError:
pass
__magic_name__ : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Dict = """ """
__magic_name__ : int = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__magic_name__: int = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 342 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 | 1 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(_A, 2 ) - pow(_A, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_A, 2 ) - pow(_A, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_A, 2 ) + pow(_A, 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : list[tuple[float, float]] ) ->Optional[Any]:
"""simple docstring"""
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__UpperCAmelCase ) - 1
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : float ) ->list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : float ) ->tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__UpperCAmelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowerCAmelCase ( self : int , __UpperCAmelCase : float = 0.01 ) ->Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 0 |
from math import factorial
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_A, _A ) or not isinstance(_A, _A ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__magic_name__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__magic_name__ : Any = float(factorial(_A ) )
coefficient /= factorial(_A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 342 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.