code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import sqrt
def __a ( A ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( A = 10_001 ) -> int:
'''simple docstring'''
A__ = 0
A__ = 1
while count != nth and number < 3:
number += 1
if is_prime(_UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''') | 337 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 0 |
'''simple docstring'''
import random
def a__ ( lowercase : int, lowercase : float, lowercase : bool = False ) -> dict:
"""simple docstring"""
_UpperCamelCase = {i: [] for i in range(_UpperCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCAmelCase ):
for j in range(i + 1, _UpperCAmelCase ):
if random.random() < probability:
graph[i].append(_UpperCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCAmelCase )
return graph
def a__ ( lowercase : int ) -> dict:
"""simple docstring"""
return {
i: [j for j in range(_UpperCAmelCase ) if i != j] for i in range(_UpperCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = PegasusConfig
lowerCamelCase__ = {}
lowerCamelCase__ = "gelu"
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = eos_token_id
_lowerCamelCase : Optional[int] = pad_token_id
_lowerCamelCase : List[Any] = bos_token_id
def A_ ( self ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCamelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : Optional[int] = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = 20
_lowerCamelCase : Optional[int] = model_class_name(lowercase )
_lowerCamelCase : int = model.encode(inputs_dict['input_ids'] )
_lowerCamelCase : str = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCamelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
_lowerCamelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowerCamelCase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Any = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
_lowerCamelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCamelCase : str = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
_lowerCamelCase : str = model.decode(lowercase , lowercase )
_lowerCamelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = 20
_lowerCamelCase : List[Any] = model_class_name(lowercase )
_lowerCamelCase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowerCamelCase : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_lowerCamelCase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCamelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
_lowerCamelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
_lowerCamelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCamelCase : Tuple = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
_lowerCamelCase : str = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
_lowerCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
_lowerCamelCase : Tuple = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCamelCase : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : List[str] = FlaxPegasusModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Dict = self._prepare_for_class(lowercase , lowercase )
_lowerCamelCase : Dict = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : int = encode_jitted(**lowercase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : Dict = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : int = model_class(lowercase )
_lowerCamelCase : Any = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowerCamelCase : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : Any = decode_jitted(**lowercase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : int = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A_ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowercase )
_lowerCamelCase : str = np.ones((1, 1) )
_lowerCamelCase : Optional[Any] = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
_lowerCamelCase : Dict = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
_lowerCamelCase : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_lowerCamelCase : List[Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_lowerCamelCase : Union[str, Any] = tokenizer(lowercase , return_tensors='np' , truncation=lowercase , max_length=512 , padding=lowercase )
_lowerCamelCase : List[Any] = model.generate(**lowercase , num_beams=2 ).sequences
_lowerCamelCase : Optional[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded | 630 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 4000000 ) -> int:
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__lowercase : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase ( __A : str ) -> List[str]:
'''simple docstring'''
with open(_UpperCAmelCase , """rb""" ) as f:
snake_case : Optional[int] = Image.open(_UpperCAmelCase )
return im.convert("""RGB""" )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowerCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
__lowerCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
__lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__lowerCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowerCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def snake_case_ ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCAmelCase__ )} , )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__lowerCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCamelCase : str = field(default=UpperCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__lowerCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowerCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase ( __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case : Tuple = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase ( ) -> List[Any]:
'''simple docstring'''
snake_case : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : str = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case : Optional[Any] = {}
if data_args.train_dir is not None:
snake_case : Tuple = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
snake_case : Optional[int] = os.path.join(data_args.validation_dir , """**""" )
snake_case : Dict = load_dataset(
"""imagefolder""" , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : Optional[int] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
snake_case : int = dataset["""train"""].train_test_split(data_args.train_val_split )
snake_case : Dict = split["""train"""]
snake_case : int = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : Tuple = dataset["""train"""].features["""labels"""].names
snake_case : Tuple = {}, {}
for i, label in enumerate(_UpperCAmelCase ):
snake_case : int = str(_UpperCAmelCase )
snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case : Optional[int] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : Dict ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel=_UpperCAmelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : Optional[int] = image_processor.size["""shortest_edge"""]
else:
snake_case : str = (image_processor.size["""height"""], image_processor.size["""width"""])
snake_case : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case : Tuple = Compose(
[
RandomResizedCrop(_UpperCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Tuple = Compose(
[
Resize(_UpperCAmelCase ),
CenterCrop(_UpperCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__A : Any ):
snake_case : Optional[int] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__A : Tuple ):
snake_case : Tuple = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case : List[str] = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case : int = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCAmelCase )
# Initalize our trainer
snake_case : Optional[Any] = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
snake_case : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : List[str] = last_checkpoint
snake_case : int = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Dict = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCAmelCase )
trainer.save_metrics("""eval""" , _UpperCAmelCase )
# Write model card and (optionally) push to hub
snake_case : List[Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
if __name__ == "__main__":
main()
| 36 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 458 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _UpperCamelCase ( A , A ):
UpperCamelCase_ ={
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
UpperCamelCase_ =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCamelCase_ =BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_UpperCAmelCase , output_all_encodings=_UpperCAmelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _UpperCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCamelCase_ ="""openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
UpperCamelCase_ =os.path.join(get_home_dir() , "models" )
UpperCamelCase_ =_load_vocab(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls=_UpperCAmelCase )
UpperCamelCase_ =nlp.model.BERTModel(
_UpperCAmelCase , len(_UpperCAmelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_UpperCAmelCase , use_token_type_embed=_UpperCAmelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_UpperCAmelCase , use_decoder=_UpperCAmelCase , )
original_bort.load_parameters(_UpperCAmelCase , cast_dtype=_UpperCAmelCase , ignore_extra=_UpperCAmelCase )
UpperCamelCase_ =original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCamelCase_ ={
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(_UpperCAmelCase ),
}
UpperCamelCase_ =BertConfig.from_dict(_UpperCAmelCase )
UpperCamelCase_ =BertForMaskedLM(_UpperCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A , A ):
UpperCamelCase_ =hf_param.shape
UpperCamelCase_ =to_torch(params[gluon_param] )
UpperCamelCase_ =gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
UpperCamelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
UpperCamelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
UpperCamelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
UpperCamelCase_ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCamelCase_ =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCamelCase_ =hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCamelCase_ =layer.attention.self
UpperCamelCase_ =check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
UpperCamelCase_ =check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
UpperCamelCase_ =check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
UpperCamelCase_ =check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
UpperCamelCase_ =check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
UpperCamelCase_ =check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
UpperCamelCase_ =layer.attention.output
UpperCamelCase_ =check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
UpperCamelCase_ =check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
UpperCamelCase_ =check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
UpperCamelCase_ =check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
UpperCamelCase_ =layer.intermediate
UpperCamelCase_ =check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
UpperCamelCase_ =check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
UpperCamelCase_ =layer.output
UpperCamelCase_ =check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
UpperCamelCase_ =check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
UpperCamelCase_ =check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
UpperCamelCase_ =check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCamelCase_ =RobertaTokenizer.from_pretrained("roberta-base" )
UpperCamelCase_ =tokenizer.encode_plus(_UpperCAmelCase )["""input_ids"""]
# Get gluon output
UpperCamelCase_ =mx.nd.array([input_ids] )
UpperCamelCase_ =original_bort(inputs=_UpperCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ =BertModel.from_pretrained(_UpperCAmelCase )
hf_bort_model.eval()
UpperCamelCase_ =tokenizer.encode_plus(_UpperCAmelCase , return_tensors="pt" )
UpperCamelCase_ =hf_bort_model(**_UpperCAmelCase )[0]
UpperCamelCase_ =output_gluon[0].asnumpy()
UpperCamelCase_ =output_hf[0].detach().numpy()
UpperCamelCase_ =np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCamelCase_ =np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 391 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 0 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE__ = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = len(_UpperCAmelCase )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A_ : List[str] = '''AGGTAB'''
A_ : Optional[int] = '''GXTXAYB'''
A_ : List[str] = 4
A_ : List[str] = '''GTAB'''
A_ : Optional[Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 196 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] , _lowercase : List[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = """sgugger/tiny-distilbert-classification"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[Any] = TensorFlowBenchmark(_lowercase , [config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmark(_lowercase , [config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Dict = TensorFlowBenchmark(_lowercase , [config] )
SCREAMING_SNAKE_CASE__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[Any] = TensorFlowBenchmark(_lowercase , configs=[config] )
SCREAMING_SNAKE_CASE__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Dict = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowercase : List[str] ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , eager_mode=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase ( UpperCAmelCase__ : int ) -> bool:
lowerCamelCase_ = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> tuple[int, int]:
lowerCamelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase_ = x_den * y_den * z_den
lowerCamelCase_ = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase ( UpperCAmelCase__ : int = 3_5 ) -> int:
lowerCamelCase_ = set()
lowerCamelCase_ = 42
lowerCamelCase_ = Fraction(0 )
lowerCamelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCamelCase_ = x_num * y_den + x_den * y_num
lowerCamelCase_ = x_den * y_den
lowerCamelCase_ = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowerCamelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase_ = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowerCamelCase_ = int(sqrt(_UpperCAmelCase ) )
lowerCamelCase_ = int(sqrt(_UpperCAmelCase ) )
lowerCamelCase_ = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowerCamelCase_ = x_num * y_num
lowerCamelCase_ = x_den * y_num + x_num * y_den
lowerCamelCase_ = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowerCamelCase_ = x_num * x_num * y_num * y_num
lowerCamelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowerCamelCase_ = int(sqrt(_UpperCAmelCase ) )
lowerCamelCase_ = int(sqrt(_UpperCAmelCase ) )
lowerCamelCase_ = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase_ = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 272 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A = '''UperNetConfig'''
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1 , ):
super().__init__()
lowercase__: int = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , bias=_UpperCAmelCase , dilation=_UpperCAmelCase , )
lowercase__: str = nn.BatchNormad(_UpperCAmelCase )
lowercase__: Union[str, Any] = nn.ReLU()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = self.conv(_UpperCAmelCase )
lowercase__: int = self.batch_norm(_UpperCAmelCase )
lowercase__: Optional[int] = self.activation(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: str = [
nn.AdaptiveAvgPoolad(_UpperCAmelCase ),
UperNetConvModule(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = input
for layer in self.layers:
lowercase__: List[str] = layer(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: Tuple = pool_scales
lowercase__: Any = align_corners
lowercase__: Any = in_channels
lowercase__: List[Any] = channels
lowercase__: List[Any] = []
for i, pool_scale in enumerate(_UpperCAmelCase ):
lowercase__: str = UperNetPyramidPoolingBlock(pool_scale=_UpperCAmelCase , in_channels=_UpperCAmelCase , channels=_UpperCAmelCase )
self.blocks.append(_UpperCAmelCase )
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = []
for ppm in self.blocks:
lowercase__: Optional[Any] = ppm(_UpperCAmelCase )
lowercase__: Dict = nn.functional.interpolate(
_UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_UpperCAmelCase )
return ppm_outs
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = config
lowercase__: int = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__: Tuple = in_channels
lowercase__: List[str] = config.hidden_size
lowercase__: Dict = False
lowercase__: Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__: Dict = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__: Union[str, Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__: int = nn.ModuleList()
lowercase__: Optional[Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__: str = UperNetConvModule(_UpperCAmelCase , self.channels , kernel_size=1 )
lowercase__: str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_UpperCAmelCase )
self.fpn_convs.append(_UpperCAmelCase )
lowercase__: Tuple = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = inputs[-1]
lowercase__: Tuple = [x]
psp_outs.extend(self.psp_modules(_UpperCAmelCase ) )
lowercase__: List[str] = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Any = self.bottleneck(_UpperCAmelCase )
return output
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_UpperCAmelCase ) )
# build top-down path
lowercase__: str = len(_UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: Any = laterals[i - 1].shape[2:]
lowercase__: Dict = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
lowercase__: List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
lowercase__: str = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: List[Any] = self.fpn_bottleneck(_UpperCAmelCase )
lowercase__: int = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 ):
super().__init__()
lowercase__: Tuple = config
lowercase__: Optional[Any] = config.auxiliary_in_channels
lowercase__: int = config.auxiliary_channels
lowercase__: Optional[int] = config.auxiliary_num_convs
lowercase__: Union[str, Any] = config.auxiliary_concat_input
lowercase__: List[Any] = in_index
lowercase__: Tuple = (kernel_size // 2) * dilation
lowercase__: List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
if self.num_convs == 0:
lowercase__: Optional[Any] = nn.Identity()
else:
lowercase__: Any = nn.Sequential(*_UpperCAmelCase )
if self.concat_input:
lowercase__: List[str] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=kernel_size // 2 )
lowercase__: Optional[int] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = encoder_hidden_states[self.in_index]
lowercase__: Tuple = self.convs(_UpperCAmelCase )
if self.concat_input:
lowercase__: Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__: Optional[Any] = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = UperNetConfig
_UpperCAmelCase :int = "pixel_values"
_UpperCAmelCase :List[str] = True
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _snake_case ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = value
__A = R'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,UpperCAmelCase__ ,)
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__: Tuple = UperNetHead(_UpperCAmelCase , in_channels=self.backbone.channels )
lowercase__: Tuple = UperNetFCNHead(_UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: int = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__: List[str] = self.backbone.forward_with_filtered_kwargs(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
lowercase__: int = outputs.feature_maps
lowercase__: Tuple = self.decode_head(_UpperCAmelCase )
lowercase__: Union[str, Any] = nn.functional.interpolate(_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: Dict = None
if self.auxiliary_head is not None:
lowercase__: Optional[Any] = self.auxiliary_head(_UpperCAmelCase )
lowercase__: Dict = nn.functional.interpolate(
_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__: str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__: Optional[Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__: Any = (logits,) + outputs[1:]
else:
lowercase__: str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 586 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowercase__ : Optional[Any] = "xmod"
def __init__( self , UpperCamelCase__=3_05_22 , UpperCamelCase__=7_68 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=30_72 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=5_12 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
A__ = pre_norm
A__ = adapter_reduction_factor
A__ = adapter_layer_norm
A__ = adapter_reuse_layer_norm
A__ = ln_before_adapter
A__ = list(UpperCamelCase__ )
A__ = default_language
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 337 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def a__ ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if hor == 128:
_UpperCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_UpperCamelCase = (32, 128, 256)
_UpperCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
_UpperCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
_UpperCamelCase = (32, 64, 128, 256)
_UpperCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
_UpperCamelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_UpperCamelCase = model.state_dict()
_UpperCamelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
_UpperCamelCase = UNetaDModel(**_UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_UpperCamelCase = dict(zip(model.state_dict().keys(), hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_UpperCamelCase = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict(), F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""", '''w''' ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
_UpperCamelCase = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
_UpperCamelCase = model
_UpperCamelCase = UNetaDModel(**_UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_UpperCamelCase = dict(zip(state_dict.keys(), hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_UpperCamelCase = state_dict.pop(_UpperCAmelCase )
hf_value_function.load_state_dict(_UpperCAmelCase )
torch.save(hf_value_function.state_dict(), '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''', '''w''' ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 98 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = (UniPCMultistepScheduler,)
lowerCamelCase__ = (("num_inference_steps", 25),)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**lowercase )
return config
def A_ ( self , lowercase=0 , **lowercase ):
_lowerCamelCase : int = dict(self.forward_default_kwargs )
_lowerCamelCase : Tuple = kwargs.pop('num_inference_steps' , lowercase )
_lowerCamelCase : str = self.dummy_sample
_lowerCamelCase : Dict = 0.1 * sample
_lowerCamelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str = self.get_scheduler_config(**lowercase )
_lowerCamelCase : List[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
_lowerCamelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
_lowerCamelCase : List[str] = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
_lowerCamelCase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase : Tuple = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_lowerCamelCase : str = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A_ ( self , lowercase=0 , **lowercase ):
_lowerCamelCase : str = dict(self.forward_default_kwargs )
_lowerCamelCase : List[Any] = kwargs.pop('num_inference_steps' , lowercase )
_lowerCamelCase : Optional[int] = self.dummy_sample
_lowerCamelCase : Union[str, Any] = 0.1 * sample
_lowerCamelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Tuple = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
_lowerCamelCase : Optional[int] = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCamelCase : str = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_lowerCamelCase : List[Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A_ ( self , lowercase=None , **lowercase ):
if scheduler is None:
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : Tuple = self.get_scheduler_config(**lowercase )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config(**lowercase )
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Optional[Any] = 10
_lowerCamelCase : Dict = self.dummy_model()
_lowerCamelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : Any = model(lowercase , lowercase )
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCamelCase : List[Any] = kwargs.pop('num_inference_steps' , lowercase )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : int = self.dummy_sample
_lowerCamelCase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , 'set_timesteps' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , 'set_timesteps' ):
_lowerCamelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCamelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCamelCase : Any = scheduler.timesteps[5]
_lowerCamelCase : Any = scheduler.timesteps[6]
_lowerCamelCase : Dict = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
_lowerCamelCase : Union[str, Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCamelCase : List[str] = self.full_loop(scheduler=lowercase )
_lowerCamelCase : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
_lowerCamelCase : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCamelCase : Dict = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCamelCase : Optional[int] = self.full_loop(scheduler=lowercase )
_lowerCamelCase : str = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def A_ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , )
def A_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
_lowerCamelCase : Optional[int] = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def A_ ( self ):
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def A_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def A_ ( self ):
_lowerCamelCase : List[str] = self.full_loop()
_lowerCamelCase : str = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : int = self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Tuple = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : str = 10
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
def A_ ( self , **lowercase ):
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 630 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase ( _lowerCAmelCase : int = 1000000, _lowerCAmelCase : int = 10 ) -> int:
_UpperCAmelCase : defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCAmelCase : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
_UpperCAmelCase : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = 1
snake_case : Dict = 3
snake_case : List[Any] = (32, 32)
snake_case : Dict = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=SCREAMING_SNAKE_CASE_ ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : Optional[Any] = DDPMScheduler()
snake_case : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : Any = self.dummy_vae
snake_case : Any = self.dummy_text_encoder
snake_case : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Any = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case : int = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : Tuple = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ ,low_res_scheduler=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,max_noise_level=350 ,)
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = """A painting of a squirrel eating a burger"""
snake_case : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Any = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case : Optional[int] = output.images
snake_case : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : List[Any] = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=SCREAMING_SNAKE_CASE_ ,)[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : int = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : List[str] = DDPMScheduler()
snake_case : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : str = self.dummy_vae
snake_case : List[str] = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Optional[int] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case : Tuple = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ ,low_res_scheduler=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,max_noise_level=350 ,)
snake_case : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = """A painting of a squirrel eating a burger"""
snake_case : List[Any] = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case : Any = output.images
assert image.shape[0] == 2
snake_case : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
snake_case : Dict = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.dummy_cond_unet_upscale
snake_case : Dict = DDPMScheduler()
snake_case : Any = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : str = self.dummy_vae
snake_case : Dict = self.dummy_text_encoder
snake_case : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case : Any = unet.half()
snake_case : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ ,low_res_scheduler=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,vae=SCREAMING_SNAKE_CASE_ ,text_encoder=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,max_noise_level=350 ,)
snake_case : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Any = """A painting of a squirrel eating a burger"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : int = sd_pipe(
[prompt] ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=2 ,output_type="""np""" ,).images
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
snake_case : str = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
snake_case : str = """a cat sitting on a park bench"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,output_type="""np""" ,)
snake_case : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
snake_case : str = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : int = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ ,torch_dtype=torch.floataa ,)
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
snake_case : List[str] = """a cat sitting on a park bench"""
snake_case : Tuple = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,output_type="""np""" ,)
snake_case : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : str = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ ,torch_dtype=torch.floataa ,)
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Any = """a cat sitting on a park bench"""
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ ,image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=5 ,output_type="""np""" ,)
snake_case : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 36 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 0 |
def __magic_name__ ( lowercase , lowercase ) -> int:
"""simple docstring"""
while b:
lowercase_ : Any = b, a % b
return a
def __magic_name__ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(_UpperCAmelCase , a % b )
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main() | 458 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: int | None = None ):
UpperCamelCase_ =value
UpperCamelCase_ =None # Added in order to delete a node easier
UpperCamelCase_ =None
UpperCamelCase_ =None
def __repr__( self: Dict ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: Node | None = None ):
UpperCamelCase_ =root
def __str__( self: Optional[Any] ):
return str(self.root )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Node , UpperCamelCase_: Node | None ):
if new_children is not None: # reset its kids
UpperCamelCase_ =node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCamelCase_ ): # If it is the right children
UpperCamelCase_ =new_children
else:
UpperCamelCase_ =new_children
else:
UpperCamelCase_ =new_children
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCamelCase__ ( self: Any ):
return self.root is None
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =Node(UpperCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
UpperCamelCase_ =new_node # set its root
else: # Tree is not empty
UpperCamelCase_ =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCamelCase_ =new_node # We insert the new node in a leaf
break
else:
UpperCamelCase_ =parent_node.left
else:
if parent_node.right is None:
UpperCamelCase_ =new_node
break
else:
UpperCamelCase_ =parent_node.right
UpperCamelCase_ =parent_node
def UpperCamelCase__ ( self: Optional[Any] , *UpperCamelCase_: int ):
for value in values:
self.__insert(UpperCamelCase_ )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any ):
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
UpperCamelCase_ =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCamelCase_ =node.left if value < node.value else node.right
return node
def UpperCamelCase__ ( self: str , UpperCamelCase_: Node | None = None ):
if node is None:
if self.root is None:
return None
UpperCamelCase_ =self.root
if not self.empty():
while node.right is not None:
UpperCamelCase_ =node.right
return node
def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: Node | None = None ):
if node is None:
UpperCamelCase_ =self.root
if self.root is None:
return None
if not self.empty():
UpperCamelCase_ =self.root
while node.left is not None:
UpperCamelCase_ =node.left
return node
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: int ):
UpperCamelCase_ =self.search(UpperCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCamelCase_ , UpperCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCamelCase_ , node.left )
else:
UpperCamelCase_ =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCamelCase_ =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: list , UpperCamelCase_: Node | None ):
if node:
self.inorder(UpperCamelCase_ , node.left )
arr.append(node.value )
self.inorder(UpperCamelCase_ , node.right )
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Node ):
UpperCamelCase_ =[]
self.inorder(UpperCamelCase_ , UpperCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCamelCase ( A ):
UpperCamelCase_ =[]
if curr_node is not None:
UpperCamelCase_ =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCamelCase ( ):
UpperCamelCase_ =(8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCamelCase_ =BinarySearchTree()
for i in testlist:
t.insert(_UpperCAmelCase )
# Prints all the elements of the list in order traversal
print(_UpperCAmelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCAmelCase )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 391 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A_ : List[str] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A_ : Tuple = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A_ : Optional[int] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : str="binary" , __UpperCAmelCase : Optional[Any]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = fa_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase )
return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
| 196 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
def a ( A__ , A__ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
SCREAMING_SNAKE_CASE__ : List[str] = str(bin(_UpperCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def a ( A__ , A__ ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
SCREAMING_SNAKE_CASE__ : int = str(bin(_UpperCAmelCase ) )[2:]
if shift_amount >= len(_UpperCAmelCase ):
return "0b0"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = binary_number[: len(_UpperCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def a ( A__ , A__ ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
SCREAMING_SNAKE_CASE__ : List[Any] = """0""" + str(bin(_UpperCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
SCREAMING_SNAKE_CASE__ : Any = len(bin(_UpperCAmelCase )[3:] ) # Find 2's complement of number
SCREAMING_SNAKE_CASE__ : int = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE__ : Any = (
"""1""" + """0""" * (binary_number_length - len(_UpperCAmelCase )) + binary_number
)
if shift_amount >= len(_UpperCAmelCase ):
return "0b" + binary_number[0] * len(_UpperCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_UpperCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
"""simple docstring"""
from __future__ import annotations
__A = []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = 1
solve(_UpperCAmelCase , row + 1 )
lowercase__: int = 0
return False
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> None:
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__A = 8
__A = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 586 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
"""simple docstring"""
def __a ( A , A ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''') | 337 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowercase : Any, lowercase : Dict, lowercase : str ) -> Dict:
"""simple docstring"""
_UpperCamelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 98 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase__ = {
# 1536-bit
5: {
'''prime''': int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
'''generator''': 2,
},
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
_lowerCamelCase : Any = primes[group]["""prime"""]
_lowerCamelCase : Optional[int] = primes[group]["""generator"""]
_lowerCamelCase : Any = int(hexlify(urandom(32 ) ) , base=16 )
def A_ ( self ):
return hex(self.__private_key )[2:]
def A_ ( self ):
_lowerCamelCase : Any = pow(self.generator , self.__private_key , self.prime )
return hex(lowercase )[2:]
def A_ ( self , lowercase ):
return (
2 <= key <= self.prime - 2
and pow(lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def A_ ( self , lowercase ):
_lowerCamelCase : Dict = int(lowercase , base=16 )
if not self.is_valid_public_key(lowercase ):
raise ValueError('Invalid public key' )
_lowerCamelCase : List[str] = pow(lowercase , self.__private_key , self.prime )
return shaaaa(str(lowercase ).encode() ).hexdigest()
@staticmethod
def A_ ( lowercase , lowercase ):
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowercase , (prime - 1) // 2 , lowercase ) == 1
)
@staticmethod
def A_ ( lowercase , lowercase , lowercase = 14 ):
_lowerCamelCase : List[str] = int(lowercase , base=16 )
_lowerCamelCase : Dict = int(lowercase , base=16 )
_lowerCamelCase : Dict = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(lowercase , lowercase ):
raise ValueError('Invalid public key' )
_lowerCamelCase : Dict = pow(lowercase , lowercase , lowercase )
return shaaaa(str(lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 630 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( UpperCAmelCase__):
__a : List[str] = (EulerDiscreteScheduler,)
__a : Optional[int] = 1_0
def __snake_case ( self , **_A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_A )
return config
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_A )
def __snake_case ( self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Dict = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Optional[int] = model(_A , _A )
_UpperCAmelCase : Dict = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : Any = output.prev_sample
_UpperCAmelCase : Dict = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : Tuple = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : List[Any] = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Dict = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Optional[Any] = model(_A , _A )
_UpperCAmelCase : Tuple = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : Dict = output.prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCAmelCase : Any = torch.manual_seed(0 )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : List[str] = sample.to(_A )
for t in scheduler.timesteps:
_UpperCAmelCase : List[str] = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Any = model(_A , _A )
_UpperCAmelCase : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : List[Any] = output.prev_sample
_UpperCAmelCase : int = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : Dict = sample.to(_A )
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(_A , _A )
_UpperCAmelCase : Tuple = model(_A , _A )
_UpperCAmelCase : List[str] = scheduler.step(_A , _A , _A , generator=_A )
_UpperCAmelCase : Optional[Any] = output.prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = "encoder-decoder"
__lowerCamelCase : Union[str, Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case : str = kwargs.pop("""encoder""" )
snake_case : Optional[Any] = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Dict = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case : List[str] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : str = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Union[str, Any] = True
snake_case : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
snake_case : Tuple = self.encoder.to_dict()
snake_case : Dict = self.decoder.to_dict()
snake_case : Optional[Any] = self.__class__.model_type
return output
| 36 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCAmelCase_ = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class UpperCamelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = load_tool("""text-question-answering""" )
self.tool.setup()
lowercase_ : Any = load_tool("""text-question-answering""", remote=snake_case__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.tool(snake_case__, """What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.remote_tool(snake_case__, """What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = self.tool(text=snake_case__, question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = self.remote_tool(text=snake_case__, question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(snake_case__, """launched the BigScience Research Workshop""" ) | 458 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: int=3 , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: List[Any]=[10, 20, 30, 40] , UpperCamelCase_: Optional[int]=[1, 1, 2, 1] , UpperCamelCase_: Dict=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]="relu" , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: List[str]=None , ):
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =image_size
UpperCamelCase_ =num_channels
UpperCamelCase_ =embeddings_size
UpperCamelCase_ =hidden_sizes
UpperCamelCase_ =depths
UpperCamelCase_ =is_training
UpperCamelCase_ =use_labels
UpperCamelCase_ =hidden_act
UpperCamelCase_ =num_labels
UpperCamelCase_ =scope
UpperCamelCase_ =len(UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ =self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self: Dict ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: int ):
UpperCamelCase_ =FlaxRegNetModel(config=UpperCamelCase_ )
UpperCamelCase_ =model(UpperCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ):
UpperCamelCase_ =self.num_labels
UpperCamelCase_ =FlaxRegNetForImageClassification(config=UpperCamelCase_ )
UpperCamelCase_ =model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =self.prepare_config_and_inputs()
UpperCamelCase_ =config_and_inputs
UpperCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__lowerCamelCase : int = False
__lowerCamelCase : Any = False
__lowerCamelCase : Optional[int] = False
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =FlaxRegNetModelTester(self )
UpperCamelCase_ =ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def UpperCamelCase__ ( self: Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self: Optional[int] ):
return
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCamelCase__ ( self: str ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCamelCase__ ( self: Union[str, Any] ):
pass
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ =[*signature.parameters.keys()]
UpperCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
def check_hidden_states_output(UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ):
UpperCamelCase_ =model_class(UpperCamelCase_ )
UpperCamelCase_ =model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase__ ( self: Union[str, Any] ):
UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ =self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ =model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Dict , **UpperCamelCase_: str ):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("JIT Enabled" ):
UpperCamelCase_ =model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase_ =model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ):
UpperCamelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self: Tuple ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self: List[str] ):
UpperCamelCase_ =FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
UpperCamelCase_ =self.default_image_processor
UpperCamelCase_ =prepare_img()
UpperCamelCase_ =image_processor(images=UpperCamelCase_ , return_tensors="np" )
UpperCamelCase_ =model(**UpperCamelCase_ )
# verify the logits
UpperCamelCase_ =(1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCamelCase_ =jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 391 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 0 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE__ = 0
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__UpperCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__UpperCAmelCase ) , 0 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__UpperCAmelCase , """vocab.txt""" ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""bert""" , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__UpperCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__UpperCAmelCase , """merges.txt""" ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""gpt2""" , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__UpperCAmelCase , """vocab.txt""" ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""bert""" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__UpperCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__UpperCAmelCase , """merges.txt""" ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type="""gpt2""" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
with pytest.raises(__UpperCAmelCase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __UpperCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__UpperCAmelCase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__UpperCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __UpperCAmelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """Hello, world. How are you?"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = get_tokenizer_config("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ = config.pop("""_commit_hash""" , __UpperCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__UpperCAmelCase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__UpperCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
try:
AutoConfig.register("""custom""" , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
try:
AutoConfig.register("""custom""" , __UpperCAmelCase )
# Can register in two steps
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__UpperCAmelCase )
bert_tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : int = False
class lowerCamelCase (UpperCAmelCase__ ):
lowerCamelCase__ : List[Any] = NewTokenizer
lowerCamelCase__ : Union[str, Any] = False
try:
AutoConfig.register("""custom""" , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
with self.assertRaisesRegex(
__UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
with self.assertRaisesRegex(
__UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 196 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = len(matrix[0] )
SCREAMING_SNAKE_CASE__ : Dict = min(_UpperCAmelCase , _UpperCAmelCase )
for row in range(_UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = matrix[col][row] / matrix[row][row]
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE__ : Tuple = True
for i in range(row + 1 , _UpperCAmelCase ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if reduce:
rank -= 1
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = '''▁'''
lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowercase = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class __A( UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Optional[int]="</s>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Dict="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Dict , ):
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase_ = 7
lowerCamelCase_ = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowerCamelCase_ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
lowerCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def lowercase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase__ ( self : List[str] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : int , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Any , __UpperCamelCase : int ):
lowerCamelCase_ = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 272 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__A = '''
import os
'''
__A = '''
def foo():
import os
return False
'''
__A = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__A = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__A = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__A = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__A = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__A = '''
import os
try:
import bar
except:
raise ValueError()
'''
__A = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__A = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__A = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , '''test_file.py''' )
with open(_UpperCAmelCase , '''w''' ) as _tmp_file:
_tmp_file.write(_UpperCAmelCase )
lowercase__: Dict = get_imports(_UpperCAmelCase )
assert parsed_imports == ["os"]
| 586 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase__ = 1_28 , UpperCamelCase__ = 2_56 , UpperCamelCase__ = 20_00.0 , UpperCamelCase__ = 7_68 , UpperCamelCase__ = 12 , UpperCamelCase__ = 12 , UpperCamelCase__ = 64 , UpperCamelCase__ = 20_48 , UpperCamelCase__ = 0.1 , ):
'''simple docstring'''
super().__init__()
A__ = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A__ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A__ = False
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A__ = nn.Dropout(p=UpperCamelCase__ )
A__ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A__ = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A__ = TaLayerNorm(UpperCamelCase__ )
A__ = nn.Dropout(p=UpperCamelCase__ )
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A__ = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A__ = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A__ = self.position_encoding(UpperCamelCase__ )
A__ = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A__ = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A__ = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A__ = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A__ = self.decoder_norm(UpperCamelCase__ )
A__ = self.post_dropout(UpperCamelCase__ )
A__ = self.spec_out(UpperCamelCase__ )
return spec_out
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1e-6 ):
'''simple docstring'''
super().__init__()
A__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
A__ = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A__ = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
A__ = TaLayerNorm(UpperCamelCase__ )
A__ = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A__ = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A__ = nn.Dropout(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A__ = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A__ = self.attention(UpperCamelCase__ )
A__ = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
A__ = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A__ = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A__ = nn.Dropout(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
A__ = self.layer_norm(UpperCamelCase__ )
A__ = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A__ = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
A__ = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A__ = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A__ = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A__ = nn.Dropout(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
A__ = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A__ = self.film(UpperCamelCase__ , UpperCamelCase__ )
A__ = self.DenseReluDense(UpperCamelCase__ )
A__ = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A__ = nn.Dropout(UpperCamelCase__ )
A__ = NewGELUActivation()
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.act(self.wi_a(UpperCamelCase__ ) )
A__ = self.wi_a(UpperCamelCase__ )
A__ = hidden_gelu * hidden_linear
A__ = self.dropout(UpperCamelCase__ )
A__ = self.wo(UpperCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1e-6 ):
'''simple docstring'''
super().__init__()
A__ = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A__ = eps
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__ ( nn.Module ):
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class lowerCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
super().__init__()
A__ = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.scale_bias(UpperCamelCase__ )
A__ = torch.chunk(UpperCamelCase__ , 2 , -1 )
A__ = x * (1 + scale) + shift
return x | 337 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
_UpperCamelCase = num_of_nodes
_UpperCamelCase = []
_UpperCamelCase = {}
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def snake_case__ ( self : Dict , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCamelCase = self.find_component(lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
_UpperCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
_UpperCamelCase = self.find_component(lowerCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase__ )
def snake_case__ ( self : int ) -> None:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_UpperCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCamelCase = edge
_UpperCamelCase = self.m_component[u]
_UpperCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = edge
_UpperCamelCase = self.m_component[u]
_UpperCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_UpperCamelCase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a__ ( ) -> None:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ = 1 , lowercase__ = 1000 ):
_lowerCamelCase : Dict = 1
_lowerCamelCase : Union[str, Any] = 0
for divide_by_number in range(_UpperCAmelCase , digit + 1 ):
_lowerCamelCase : list[int] = []
_lowerCamelCase : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_UpperCAmelCase ):
_lowerCamelCase : Optional[int] = len(_UpperCAmelCase )
_lowerCamelCase : int = divide_by_number
else:
has_been_divided.append(_UpperCAmelCase )
_lowerCamelCase : Optional[int] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 630 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _UpperCAmelCase ( UpperCAmelCase__):
__a : List[Any] = "time_series_transformer"
__a : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _A = None , _A = None , _A = "student_t" , _A = "nll" , _A = 1 , _A = [1, 2, 3, 4, 5, 6, 7] , _A = "mean" , _A = 0 , _A = 0 , _A = 0 , _A = 0 , _A = None , _A = None , _A = 32 , _A = 32 , _A = 2 , _A = 2 , _A = 2 , _A = 2 , _A = True , _A = "gelu" , _A = 64 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 1_00 , _A = 0.02 , _A=True , **_A , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = prediction_length
_UpperCAmelCase : Union[str, Any] = context_length or prediction_length
_UpperCAmelCase : List[str] = distribution_output
_UpperCAmelCase : Tuple = loss
_UpperCAmelCase : List[Any] = input_size
_UpperCAmelCase : Optional[Any] = num_time_features
_UpperCAmelCase : Union[str, Any] = lags_sequence
_UpperCAmelCase : Optional[int] = scaling
_UpperCAmelCase : List[Any] = num_dynamic_real_features
_UpperCAmelCase : List[str] = num_static_real_features
_UpperCAmelCase : str = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase : Any = cardinality
else:
_UpperCAmelCase : List[str] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase : Union[str, Any] = embedding_dimension
else:
_UpperCAmelCase : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : List[str] = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Any = input_size * len(_A ) + self._number_of_features
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Union[str, Any] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Optional[Any] = decoder_ffn_dim
_UpperCAmelCase : List[str] = encoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : Optional[int] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : Union[str, Any] = encoder_layerdrop
_UpperCAmelCase : Optional[int] = decoder_layerdrop
_UpperCAmelCase : Tuple = activation_function
_UpperCAmelCase : Optional[Any] = init_std
_UpperCAmelCase : List[Any] = use_cache
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __snake_case ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Union[str, Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __magic_name__ ( lowercase ) -> Optional[int]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __magic_name__ ( lowercase ) -> str: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__a : int
__a : str
class UpperCamelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : List[str] = {}
lowercase_ : Dict = []
lowercase_ : Any = 1
lowercase_ : Any = [1, 2]
lowercase_ : Dict = {"""a""": 1, """b""": 2}
lowercase_ : str = {"""a""": [1, 2], """b""": [3, 4]}
lowercase_ : Optional[int] = {"""a""": {"""1""": 1}, """b""": 2}
lowercase_ : Optional[int] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowercase_ : Any = {}
lowercase_ : Tuple = []
lowercase_ : Any = 2
lowercase_ : Optional[Any] = [2, 3]
lowercase_ : Union[str, Any] = {"""a""": 2, """b""": 3}
lowercase_ : Dict = {"""a""": [2, 3], """b""": [4, 5]}
lowercase_ : List[str] = {"""a""": {"""1""": 2}, """b""": 3}
lowercase_ : Optional[int] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__ ), snake_case__ )
lowercase_ : Any = 2
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(map_nested(snake_case__, snake_case__, num_proc=snake_case__ ), snake_case__ )
lowercase_ : List[str] = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
lowercase_ : Optional[Any] = {"""a""": 2, """b""": 0, """c""": 2}
lowercase_ : Tuple = {
"""a""": np.eye(2 ).astype(snake_case__ ),
"""b""": np.zeros(3 ).astype(snake_case__ ),
"""c""": np.ones(2 ).astype(snake_case__ ),
}
self.assertEqual(map_nested(snake_case__, snake_case__, map_numpy=snake_case__ ), snake_case__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case__, snake_case__, map_numpy=snake_case__ ).items()}, {k: v.tolist() for k, v in expected_map_nested_sna_int.items()}, )
self.assertEqual(map_nested(snake_case__, snake_case__, map_numpy=snake_case__, num_proc=snake_case__ ), snake_case__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case__, snake_case__, map_numpy=snake_case__, num_proc=snake_case__ ).items()}, {k: v.tolist() for k, v in expected_map_nested_sna_int.items()}, )
with self.assertRaises(snake_case__ ): # can't pickle a local lambda
map_nested(lambda snake_case__ : x + 1, snake_case__, num_proc=snake_case__ )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[Any] = {"""a""": 1, """b""": 2}
lowercase_ : Dict = {"""a""": 3, """b""": 4}
lowercase_ : Dict = {"""a""": 5, """b""": 6}
lowercase_ : str = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case__, snake_case__, snake_case__ ) ), snake_case__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
class UpperCamelCase__ :
'''simple docstring'''
__a : Optional[Any] = "bar"
lowercase_ : str = Foo()
self.assertEqual(foo.my_attr, """bar""" )
with temporary_assignment(snake_case__, """my_attr""", """BAR""" ):
self.assertEqual(foo.my_attr, """BAR""" )
self.assertEqual(foo.my_attr, """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Dict:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
lowercase_ : Any = {f"""{i}""": i for i in range(_UpperCAmelCase )}
lowercase_ : Optional[Any] = map_nested(lambda lowercase : x + 10 , _UpperCAmelCase , num_proc=_UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@require_tf
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
lowercase_ : List[Any] = layers.Dense(2 )
def gen_random_output():
lowercase_ : Union[str, Any] = tf.random.uniform((1, 3) )
return model(snake_case__ ).numpy()
with temp_seed(42, set_tensorflow=snake_case__ ):
lowercase_ : Optional[int] = gen_random_output()
with temp_seed(42, set_tensorflow=snake_case__ ):
lowercase_ : Tuple = gen_random_output()
lowercase_ : List[str] = gen_random_output()
np.testing.assert_equal(snake_case__, snake_case__ )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
@require_torch
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
import torch
def gen_random_output():
lowercase_ : Dict = torch.nn.Linear(3, 2 )
lowercase_ : Dict = torch.rand(1, 3 )
return model(snake_case__ ).detach().numpy()
with temp_seed(42, set_pytorch=snake_case__ ):
lowercase_ : str = gen_random_output()
with temp_seed(42, set_pytorch=snake_case__ ):
lowercase_ : Any = gen_random_output()
lowercase_ : Optional[Any] = gen_random_output()
np.testing.assert_equal(snake_case__, snake_case__ )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1, 3 )
with temp_seed(42 ):
lowercase_ : Tuple = gen_random_output()
with temp_seed(42 ):
lowercase_ : Optional[Any] = gen_random_output()
lowercase_ : Dict = gen_random_output()
np.testing.assert_equal(snake_case__, snake_case__ )
self.assertGreater(np.abs(outa - outa ).sum(), 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __magic_name__ ( lowercase ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = NestedDataStructure(_UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __magic_name__ ( lowercase , lowercase ) -> int:
"""simple docstring"""
lowercase_ : List[str] = NestedDataStructure(_UpperCAmelCase ).flatten()
assert output == expected_output
def __magic_name__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[str] = A(x=1 , y="""foobar""" )
lowercase_ : int = {"""x""": 1, """y""": """foobar"""}
assert asdict(_UpperCAmelCase ) == expected_output
lowercase_ : Union[str, Any] = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
lowercase_ : Any = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(_UpperCAmelCase ) == expected_output
with pytest.raises(_UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __magic_name__ ( lowercase ) -> Tuple:
"""simple docstring"""
return text.split()
def __magic_name__ ( lowercase ) -> List[Any]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __magic_name__ ( ) -> List[str]:
"""simple docstring"""
with Pool(2 ) as pool:
lowercase_ : Any = list(iflatmap_unordered(_UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase_ : int = list(iflatmap_unordered(_UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase_ : Optional[int] = []
for yield_time, content in iflatmap_unordered(
_UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(_UpperCAmelCase ) == 4 | 458 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A_ = random.Random()
def _UpperCamelCase ( A , A=1.0 , A=None , A=None ):
if rng is None:
UpperCamelCase_ =global_rng
UpperCamelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Any=400 , UpperCamelCase_: Union[str, Any]=2000 , UpperCamelCase_: List[Any]=24 , UpperCamelCase_: str=24 , UpperCamelCase_: str=0.0 , UpperCamelCase_: str=1_6000 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Dict=True , ):
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =min_seq_length
UpperCamelCase_ =max_seq_length
UpperCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ =feature_size
UpperCamelCase_ =num_mel_bins
UpperCamelCase_ =padding_value
UpperCamelCase_ =sampling_rate
UpperCamelCase_ =return_attention_mask
UpperCamelCase_ =do_normalize
def UpperCamelCase__ ( self: List[str] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Union[str, Any]=False ):
def _flatten(UpperCamelCase_: Union[str, Any] ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
UpperCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ =[np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =SpeechaTextFeatureExtractionTester(self )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =[np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ =feature_extractor(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test batched
UpperCamelCase_ =feature_extractor(UpperCamelCase_ , return_tensors="np" ).input_features
UpperCamelCase_ =feature_extractor(UpperCamelCase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_ =np.asarray(UpperCamelCase_ )
UpperCamelCase_ =feature_extractor(UpperCamelCase_ , return_tensors="np" ).input_features
UpperCamelCase_ =feature_extractor(UpperCamelCase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase_ =[None, 16, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ =feature_extractor(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =[np.sum(UpperCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase_ =[None, 16, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ =feature_extractor(
UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="np" , return_attention_mask=UpperCamelCase_ )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =[np.sum(UpperCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =feature_extractor(
UpperCamelCase_ , padding="max_length" , max_length=4 , truncation=UpperCamelCase_ , return_tensors="np" , return_attention_mask=UpperCamelCase_ , )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =feature_extractor(
UpperCamelCase_ , padding="longest" , max_length=4 , truncation=UpperCamelCase_ , return_tensors="np" , return_attention_mask=UpperCamelCase_ , )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =feature_extractor(
UpperCamelCase_ , padding="longest" , max_length=16 , truncation=UpperCamelCase_ , return_tensors="np" , return_attention_mask=UpperCamelCase_ , )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCamelCase__ ( self: int ):
import torch
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self: int , UpperCamelCase_: Optional[int] ):
from datasets import load_dataset
UpperCamelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase_ =ds.sort("id" ).select(range(UpperCamelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase_ =self._load_datasamples(1 )
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =feature_extractor(UpperCamelCase_ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCamelCase_ , atol=1e-4 ) )
| 391 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 196 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
a_ :Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a_ :List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ :int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 35 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase = random.Random()
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None ) -> Tuple:
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A( unittest.TestCase ):
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : str=4_0_0 , __UpperCamelCase : int=2_0_0_0 , __UpperCamelCase : Optional[Any]=1_0 , __UpperCamelCase : Optional[Any]=1_6_0 , __UpperCamelCase : Dict=8 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : List[str]=4_0_0_0 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=True , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = do_normalize
lowerCamelCase_ = feature_size
lowerCamelCase_ = chunk_length
lowerCamelCase_ = hop_length
def lowercase__ ( self : List[str] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : int , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Any=False ):
def _flatten(__UpperCamelCase : str ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A( UpperCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = WhisperFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = WhisperFeatureExtractionTester(self )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = feat_extract_first.mel_filters
lowerCamelCase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(__UpperCamelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__UpperCamelCase )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(__UpperCamelCase )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = feat_extract_first.mel_filters
lowerCamelCase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase_ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(__UpperCamelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_features
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase_ = np.asarray(__UpperCamelCase )
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_features
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
# Test truncation required
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
lowerCamelCase_ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
lowerCamelCase_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase_ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_features
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowercase__ ( self : int ):
import torch
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort("""id""" ).select(range(__UpperCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = WhisperFeatureExtractor()
lowerCamelCase_ = feature_extractor(__UpperCamelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __UpperCamelCase , atol=1E-4 ) )
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = self._load_datasamples(1 )[0]
lowerCamelCase_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCamelCase_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1E-3 ) )
| 272 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A = logging.getLogger(__name__)
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
lowercase__: Union[str, Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
lowercase__: int = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." ,UpperCAmelCase__ ,)
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: Tuple = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
lowercase__: List[Any] = 0
lowercase__: List[Any] = 0
lowercase__: Any = 0
lowercase__: Optional[Any] = 0
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = threshold
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = patience
def _snake_case ( self ):
lowercase__: int = 0
lowercase__: Any = 0
def _snake_case ( self ):
lowercase__: Tuple = self.inference_layers_num / self.inference_instances_num
lowercase__: Dict = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowercase__: int = input_ids.size()
elif inputs_embeds is not None:
lowercase__: str = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowercase__: Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase__: str = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
lowercase__: List[str] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase__: torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase__: List[Any] = encoder_hidden_states.size()
lowercase__: Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase__: str = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
lowercase__: Optional[Any] = self.invert_attention_mask(_UpperCAmelCase )
else:
lowercase__: Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase__: Union[str, Any] = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
lowercase__: Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
lowercase__: Tuple = embedding_output
if self.training:
lowercase__: Tuple = []
for i in range(self.config.num_hidden_layers ):
lowercase__: Any = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
lowercase__: Optional[int] = self.pooler(_UpperCAmelCase )
lowercase__: Optional[int] = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
lowercase__: List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
lowercase__: Dict = self.pooler(encoder_outputs[0] )
lowercase__: List[Any] = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
lowercase__: Any = 0
lowercase__: List[str] = None
lowercase__: Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowercase__: Optional[int] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
lowercase__: Optional[Any] = self.pooler(_UpperCAmelCase )
lowercase__: Union[str, Any] = output_layers[i](_UpperCAmelCase )
if regression:
lowercase__: Union[str, Any] = logits.detach()
if patient_result is not None:
lowercase__: List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowercase__: Optional[Any] = 0
else:
lowercase__: List[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowercase__: Union[str, Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
lowercase__: Optional[int] = 0
lowercase__: Optional[Any] = logits
if patient_counter == self.patience:
break
lowercase__: Optional[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " ,UpperCAmelCase__ ,)
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: str = config.num_labels
lowercase__: Any = BertModelWithPabee(_UpperCAmelCase )
lowercase__: List[Any] = nn.Dropout(config.hidden_dropout_prob )
lowercase__: Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
lowercase__: Any = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase__: Optional[Any] = (logits[-1],)
if labels is not None:
lowercase__: Any = None
lowercase__: Union[str, Any] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
lowercase__: Union[str, Any] = MSELoss()
lowercase__: Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowercase__: int = CrossEntropyLoss()
lowercase__: str = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowercase__: Optional[int] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase__: str = (total_loss / total_weights,) + outputs
return outputs
| 586 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase ={
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ = 3
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ = jieba
A__ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase_ ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self ):
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.remove_space:
A__ = """ """.join(inputs.strip().split() )
else:
A__ = inputs
A__ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ = unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ = """""".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ = outputs.lower()
return outputs
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.preprocess_text(UpperCamelCase__ )
A__ = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ = cur_pieces[1:]
else:
A__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 337 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[Any]=99 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Union[str, Any]=5 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : int=4 , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = True
_snake_case : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_UpperCamelCase = model(lowerCAmelCase__ )[0]
_UpperCamelCase = [1, 11, 50265]
self.assertEqual(list(output.shape ) , lowerCAmelCase__ )
# compare the actual values for a slice.
_UpperCamelCase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_UpperCamelCase = model(lowerCAmelCase__ )[0]
# compare the actual values for a slice.
_UpperCamelCase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 98 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase__ = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowercase__ = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
lowercase__ = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
lowercase__ = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A_ ( self , lowercase ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A_ ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
_lowerCamelCase : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
_lowerCamelCase : str = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )} | 630 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
lowerCamelCase__ : Optional[Any] = '''2020.9.26'''
lowerCamelCase__ : Optional[Any] = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ) -> tuple[float, float]:
if not all(isinstance(_UpperCAmelCase, (float, int) ) for val in locals().values() ):
_UpperCAmelCase : Any = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_UpperCAmelCase )
_UpperCAmelCase : str = ((x * distance) / (z + distance)) * scale
_UpperCAmelCase : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : str, _lowerCAmelCase : float ) -> tuple[float, float, float]:
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
raise TypeError("""Axis must be a str""" )
_UpperCAmelCase : Any = locals()
del input_variables["axis"]
if not all(isinstance(_UpperCAmelCase, (float, int) ) for val in input_variables.values() ):
_UpperCAmelCase : Union[str, Any] = (
"""Input values except axis must either be float or int: """
f'''{list(input_variables.values() )}'''
)
raise TypeError(_UpperCAmelCase )
_UpperCAmelCase : Any = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCAmelCase : Dict = x * math.cos(_UpperCAmelCase ) - y * math.sin(_UpperCAmelCase )
_UpperCAmelCase : int = y * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = z
elif axis == "x":
_UpperCAmelCase : Optional[Any] = y * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
_UpperCAmelCase : Any = z * math.cos(_UpperCAmelCase ) + y * math.sin(_UpperCAmelCase )
_UpperCAmelCase : Dict = x
elif axis == "y":
_UpperCAmelCase : List[Any] = x * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = z * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
_UpperCAmelCase : Any = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case : str = 6
snake_case : Any = 1
snake_case : List[str] = 1901
snake_case : Dict = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case : Union[str, Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case : List[str] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case : int = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case : Union[str, Any] = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 36 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=7, snake_case__=3, snake_case__=18, snake_case__=30, snake_case__=4_00, snake_case__=True, snake_case__=None, snake_case__=True, snake_case__=[0.5, 0.5, 0.5], snake_case__=[0.5, 0.5, 0.5], ) -> str:
"""simple docstring"""
lowercase_ : Tuple = size if size is not None else {"""height""": 18, """width""": 18}
lowercase_ : Optional[Any] = parent
lowercase_ : List[str] = batch_size
lowercase_ : int = num_channels
lowercase_ : Union[str, Any] = image_size
lowercase_ : List[Any] = min_resolution
lowercase_ : Optional[Any] = max_resolution
lowercase_ : Dict = do_resize
lowercase_ : Optional[int] = size
lowercase_ : Union[str, Any] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[int] = image_std
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : List[Any] = DPTImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Tuple = DPTImageProcessingTester(self )
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__, """image_mean""" ) )
self.assertTrue(hasattr(snake_case__, """image_std""" ) )
self.assertTrue(hasattr(snake_case__, """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__, """do_resize""" ) )
self.assertTrue(hasattr(snake_case__, """size""" ) )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""height""": 18, """width""": 18} )
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"""height""": 42, """width""": 42} )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__, Image.Image )
# Test not batched input
lowercase_ : Union[str, Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
lowercase_ : Optional[Any] = image_processing(snake_case__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case__, numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__, np.ndarray )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
lowercase_ : str = image_processing(snake_case__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case__, torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__, torch.Tensor )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), )
# Test batched
lowercase_ : Optional[Any] = image_processing(snake_case__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
), ) | 458 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =state_dict.pop(_UpperCAmelCase )
UpperCamelCase_ =val
def _UpperCamelCase ( A ):
UpperCamelCase_ =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase_ =key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCamelCase_ =value
else:
UpperCamelCase_ =value
return new_state_dict
def _UpperCamelCase ( A , A=False ):
UpperCamelCase_ =""""""
if is_panoptic:
UpperCamelCase_ ="""conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase_ =state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase_ =state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ =in_proj_weight[:256, :]
UpperCamelCase_ =in_proj_bias[:256]
UpperCamelCase_ =in_proj_weight[256:512, :]
UpperCamelCase_ =in_proj_bias[256:512]
UpperCamelCase_ =in_proj_weight[-256:, :]
UpperCamelCase_ =in_proj_bias[-256:]
def _UpperCamelCase ( ):
UpperCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_ =Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase_ ="""resnet101"""
if "dc5" in model_name:
UpperCamelCase_ =True
UpperCamelCase_ ="""panoptic""" in model_name
if is_panoptic:
UpperCamelCase_ =250
else:
UpperCamelCase_ =91
UpperCamelCase_ ="""huggingface/label-files"""
UpperCamelCase_ ="""coco-detection-id2label.json"""
UpperCamelCase_ =json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ ={int(_UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ =idalabel
UpperCamelCase_ ={v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase_ ="""coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase_ =ConditionalDetrImageProcessor(format=_UpperCAmelCase )
# prepare image
UpperCamelCase_ =prepare_img()
UpperCamelCase_ =image_processor(images=_UpperCAmelCase , return_tensors="pt" )
UpperCamelCase_ =encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase_ =torch.hub.load("DeppMeng/ConditionalDETR" , _UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
UpperCamelCase_ =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase_ ="""conditional_detr.""" + src
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase , is_panoptic=_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase_ ="""conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCamelCase_ =state_dict.pop(_UpperCAmelCase )
UpperCamelCase_ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase_ =state_dict.pop(_UpperCAmelCase )
UpperCamelCase_ =val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCamelCase_ =state_dict.pop(_UpperCAmelCase )
UpperCamelCase_ =val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCamelCase_ =state_dict.pop(_UpperCAmelCase )
UpperCamelCase_ =val
# finally, create HuggingFace model and load state dict
UpperCamelCase_ =ConditionalDetrForSegmentation(_UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=_UpperCAmelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCamelCase_ =conditional_detr(_UpperCAmelCase )
UpperCamelCase_ =model(_UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 391 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
SCREAMING_SNAKE_CASE__ = features.copy()
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , split=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if issubclass(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = jsonl_path
elif issubclass(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [jsonl_path]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
def A ( snake_case__ , snake_case__ , snake_case__=("train",) ):
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__ = """train"""
SCREAMING_SNAKE_CASE__ = {"""train""": jsonl_path, """test""": jsonl_path}
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( snake_case__ ):
'''simple docstring'''
return json.load(_UpperCAmelCase )
def A ( snake_case__ ):
'''simple docstring'''
return [json.loads(_UpperCAmelCase ) for line in buffer]
class lowerCamelCase :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert isinstance(exported_content[0] , __UpperCAmelCase )
assert len(__UpperCAmelCase ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase , orient=__UpperCAmelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCAmelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(__UpperCAmelCase ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert isinstance(exported_content[0] , __UpperCAmelCase )
assert len(__UpperCAmelCase ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , lines=__UpperCAmelCase , orient=__UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCAmelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(__UpperCAmelCase ) == 1_0
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[int] ) -> List[str]:
with pytest.raises(__UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / F"""test.json.{extension}"""
SCREAMING_SNAKE_CASE__ = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__UpperCAmelCase , __UpperCAmelCase , compression=__UpperCAmelCase ).write()
with fsspec.open(__UpperCAmelCase , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with fsspec.open(__UpperCAmelCase , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert exported_content == original_content
| 196 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ :int = None
a_ :List[Any] = logging.get_logger(__name__)
a_ :Tuple = '''▁'''
a_ :Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ :List[str] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
a_ :Tuple = {
'''google/pegasus-xsum''': 5_12,
}
class lowercase ( UpperCAmelCase__ ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = PegasusTokenizer
lowerCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : int , _lowercase : Optional[int]=None , _lowercase : List[str]=None , _lowercase : Union[str, Any]="<pad>" , _lowercase : List[Any]="</s>" , _lowercase : List[Any]="<unk>" , _lowercase : Dict="<mask_2>" , _lowercase : Any="<mask_1>" , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=1_03 , **_lowercase : Dict , ):
SCREAMING_SNAKE_CASE__ : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(_lowercase , _lowercase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_lowercase )}, but is"""
f""" {type(_lowercase )}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_lowercase ) , self.offset - 1 )
]
if len(set(_lowercase ) ) != len(_lowercase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE__ : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
_lowercase , tokenizer_file=_lowercase , pad_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , mask_token=_lowercase , mask_token_sent=_lowercase , offset=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Dict = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self : Tuple , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : str , _lowercase : List , _lowercase : Optional[List] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(_lowercase )
elif token_ids_a is None:
return self._special_token_mask(_lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : int , _lowercase : int , _lowercase : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : str = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class __A:
SCREAMING_SNAKE_CASE = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
SCREAMING_SNAKE_CASE = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
SCREAMING_SNAKE_CASE = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Benchmark training of model'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Verbose memory tracing'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Trace memory line by line'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Save result to a CSV file'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Save all print statements in a log file'''} )
SCREAMING_SNAKE_CASE = field(default=UpperCAmelCase__ , metadata={'''help''': '''Whether to print environment information'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
SCREAMING_SNAKE_CASE = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
SCREAMING_SNAKE_CASE = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
SCREAMING_SNAKE_CASE = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
SCREAMING_SNAKE_CASE = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
SCREAMING_SNAKE_CASE = field(
default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
SCREAMING_SNAKE_CASE = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
SCREAMING_SNAKE_CASE = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def lowercase__ ( self : Union[str, Any] ):
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __UpperCamelCase , )
def lowercase__ ( self : Optional[int] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowercase__ ( self : Optional[int] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def lowercase__ ( self : Optional[Any] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 272 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 0 |
"""simple docstring"""
__A = 2_5_6
# Modulus to hash a string
__A = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
lowercase__: List[str] = len(_UpperCAmelCase )
lowercase__: int = len(_UpperCAmelCase )
if p_len > t_len:
return False
lowercase__: Any = 0
lowercase__: Any = 0
lowercase__: str = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCAmelCase ):
lowercase__: int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase__: Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase__: Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase__: Union[str, Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: str = """abc1abc12"""
lowercase__: Optional[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowercase__: Optional[int] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase ) and not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 2)
lowercase__: Any = """ABABX"""
lowercase__: int = """ABABZABABYABABX"""
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 3)
lowercase__: Optional[int] = """AAAB"""
lowercase__: int = """ABAAAAAB"""
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 4)
lowercase__: int = """abcdabcy"""
lowercase__: Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 5)
lowercase__: List[str] = """Lü"""
lowercase__: List[str] = """Lüsai"""
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = """Lue"""
assert not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 586 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 0 |
"""simple docstring"""
__UpperCAmelCase ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __a ( ) -> None:
'''simple docstring'''
A__ = input("Enter message: " )
A__ = input("Enter key [alphanumeric]: " )
A__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ = """encrypt"""
A__ = encrypt_message(_UpperCAmelCase , _UpperCAmelCase )
elif mode.lower().startswith("d" ):
A__ = """decrypt"""
A__ = decrypt_message(_UpperCAmelCase , _UpperCAmelCase )
print(f"""\n{mode.title()}ed message:""" )
print(_UpperCAmelCase )
def __a ( A , A ) -> str:
'''simple docstring'''
return translate_message(_UpperCAmelCase , _UpperCAmelCase , "encrypt" )
def __a ( A , A ) -> str:
'''simple docstring'''
return translate_message(_UpperCAmelCase , _UpperCAmelCase , "decrypt" )
def __a ( A , A , A ) -> str:
'''simple docstring'''
A__ = []
A__ = 0
A__ = key.upper()
for symbol in message:
A__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_UpperCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_UpperCAmelCase ):
A__ = 0
else:
translated.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
main() | 337 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 0 |
'''simple docstring'''
from math import factorial
def a__ ( lowercase : int = 20 ) -> int:
"""simple docstring"""
_UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCamelCase = n // 2
return int(factorial(_UpperCAmelCase ) / (factorial(_UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase__ : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 98 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
_lowerCamelCase : List[str] = RemBertConfig.from_json_file(_UpperCAmelCase )
print('Building PyTorch model from configuration: {}'.format(str(_UpperCAmelCase ) ) )
_lowerCamelCase : Dict = RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(_UpperCAmelCase ) )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 630 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : Tuple = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
lowerCamelCase__ : Optional[int] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
lowerCamelCase__ : int = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def __snake_case ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __snake_case ( self , _A , _A , _A=None , _A=None , _A=None , _A=None , _A="auto" , _A=-1 , _A=0.9 , _A=5 , _A=5_00 , _A="gpt2-large" , _A=-1 , _A=10_24 , _A=25 , _A=5 , _A=True , _A=25 , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = compute_mauve(
p_text=_A , q_text=_A , p_features=_A , q_features=_A , p_tokens=_A , q_tokens=_A , num_buckets=_A , pca_max_data=_A , kmeans_explained_var=_A , kmeans_num_redo=_A , kmeans_max_iter=_A , featurize_model_name=_A , device_id=_A , max_text_length=_A , divergence_curve_discretization_size=_A , mauve_scaling_factor=_A , verbose=_A , seed=_A , )
return out
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 0 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(
features=SCREAMING_SNAKE_CASE_ ,cache_dir=SCREAMING_SNAKE_CASE_ ,keep_in_memory=SCREAMING_SNAKE_CASE_ ,streaming=SCREAMING_SNAKE_CASE_ ,num_proc=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
snake_case : int = Generator(
cache_dir=SCREAMING_SNAKE_CASE_ ,features=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,gen_kwargs=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( self ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
snake_case : Optional[Any] = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case : Tuple = None
snake_case : Tuple = None
snake_case : Any = None
snake_case : Tuple = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ ,download_mode=SCREAMING_SNAKE_CASE_ ,verification_mode=SCREAMING_SNAKE_CASE_ ,base_path=SCREAMING_SNAKE_CASE_ ,num_proc=self.num_proc ,)
snake_case : Any = self.builder.as_dataset(
split="""train""" ,verification_mode=SCREAMING_SNAKE_CASE_ ,in_memory=self.keep_in_memory )
return dataset
| 36 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase__ :
'''simple docstring'''
__a : Dict = MBartConfig
__a : Dict = {}
__a : Dict = "gelu"
def __init__( self, snake_case__, snake_case__=13, snake_case__=7, snake_case__=True, snake_case__=False, snake_case__=99, snake_case__=32, snake_case__=2, snake_case__=4, snake_case__=37, snake_case__=0.1, snake_case__=0.1, snake_case__=20, snake_case__=2, snake_case__=1, snake_case__=0, ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Tuple = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : Dict = use_labels
lowercase_ : List[Any] = vocab_size
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Tuple = eos_token_id
lowercase_ : Union[str, Any] = pad_token_id
lowercase_ : Union[str, Any] = bos_token_id
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowercase_ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowercase_ : Tuple = tf.concat([input_ids, eos_tensor], axis=1 )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase_ : Dict = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowercase_ : Union[str, Any] = prepare_mbart_inputs_dict(snake_case__, snake_case__, snake_case__ )
return config, inputs_dict
def snake_case__ ( self, snake_case__, snake_case__ ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = TFMBartModel(config=snake_case__ ).get_decoder()
lowercase_ : int = inputs_dict["""input_ids"""]
lowercase_ : Dict = input_ids[:1, :]
lowercase_ : Dict = inputs_dict["""attention_mask"""][:1, :]
lowercase_ : Tuple = inputs_dict["""head_mask"""]
lowercase_ : Optional[int] = 1
# first forward pass
lowercase_ : List[Any] = model(snake_case__, attention_mask=snake_case__, head_mask=snake_case__, use_cache=snake_case__ )
lowercase_ : Optional[int] = outputs.to_tuple()
lowercase_ : Optional[int] = past_key_values[1]
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
lowercase_ : int = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__a : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__a : Dict = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[Any] = True
__a : Union[str, Any] = False
__a : Dict = False
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : str = TFMBartModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self, config_class=snake_case__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
__a : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
]
__a : Tuple = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__a : Any = "facebook/mbart-large-en-ro"
@cached_property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case__ ( self, **snake_case__ ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[Any] = self.translate_src_text(**snake_case__ )
self.assertListEqual(self.expected_text, snake_case__ )
def snake_case__ ( self, **snake_case__ ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[int] = self.tokenizer(self.src_text, **snake_case__, return_tensors="""tf""" )
lowercase_ : List[Any] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
lowercase_ : Union[str, Any] = self.tokenizer.batch_decode(snake_case__, skip_special_tokens=snake_case__ )
return generated_words
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
self._assert_generated_batch_equal_expected() | 458 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 0 |
"""simple docstring"""
import math
def _UpperCamelCase ( A , A = 0 , A = 0 ):
UpperCamelCase_ =end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ =i
UpperCamelCase_ =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCamelCase_ =array[temp_index - 1]
temp_index -= 1
UpperCamelCase_ =temp_index_value
return array
def _UpperCamelCase ( A , A , A ): # Max Heap
UpperCamelCase_ =index
UpperCamelCase_ =2 * index + 1 # Left Node
UpperCamelCase_ =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCamelCase_ =left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCamelCase_ =right_index
if largest != index:
UpperCamelCase_ =array[largest], array[index]
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCamelCase ( A ):
UpperCamelCase_ =len(_UpperCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(n - 1 , 0 , -1 ):
UpperCamelCase_ =array[0], array[i]
heapify(_UpperCAmelCase , 0 , _UpperCAmelCase )
return array
def _UpperCamelCase ( A , A , A , A ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _UpperCamelCase ( A , A , A , A ):
UpperCamelCase_ =low
UpperCamelCase_ =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCamelCase_ =array[j], array[i]
i += 1
def _UpperCamelCase ( A ):
if len(_UpperCAmelCase ) == 0:
return array
UpperCamelCase_ =2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
UpperCamelCase_ =16
return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCamelCase ( A , A , A , A , A ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
UpperCamelCase_ =median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
UpperCamelCase_ =partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ =p
return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input("Enter numbers separated by a comma : ").strip()
A_ = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 391 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
"""simple docstring"""
import numpy
class lowerCamelCase :
def __init__( self : Optional[Any] , __UpperCAmelCase : numpy.ndarray , __UpperCAmelCase : numpy.ndarray ) -> None:
SCREAMING_SNAKE_CASE__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> numpy.ndarray:
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE ( self : Any ) -> None:
SCREAMING_SNAKE_CASE__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : numpy.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : numpy.ndarray ) -> int:
SCREAMING_SNAKE_CASE__ = input_arr
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def A ( snake_case__ ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def A ( snake_case__ ):
'''simple docstring'''
return (value) * (1 - (value))
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ = TwoHiddenLayerNeuralNetwork(
input_array=_UpperCAmelCase , output_array=_UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_UpperCAmelCase , iterations=10 , give_loss=_UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 196 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ) -> List[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCAmelCase ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def a ( ) -> int:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def a ( ) -> List[str]:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 35 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __A:
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]=9_9 , __UpperCamelCase : Optional[int]=1_3 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : str=9 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : str=3_7 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Union[str, Any]=0.002 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : str=0 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = encoder_seq_length
lowerCamelCase_ = decoder_seq_length
# For common tests
lowerCamelCase_ = self.decoder_seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = d_ff
lowerCamelCase_ = relative_attention_num_buckets
lowerCamelCase_ = dropout_rate
lowerCamelCase_ = initializer_factor
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = decoder_start_token_id
lowerCamelCase_ = None
lowerCamelCase_ = decoder_layers
def lowercase__ ( self : List[Any] ):
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowercase__ ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , __UpperCamelCase : int=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=None , ):
if attention_mask is None:
lowerCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
lowerCamelCase_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
lowerCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = config.num_attention_heads
lowerCamelCase_ = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : str ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self : Union[str, Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , ):
lowerCamelCase_ = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
lowerCamelCase_ = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
lowerCamelCase_ = result.last_hidden_state
lowerCamelCase_ = result.past_key_values
lowerCamelCase_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , ):
lowerCamelCase_ = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
lowerCamelCase_ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
lowerCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = model(__UpperCamelCase )["""last_hidden_state"""]
lowerCamelCase_ = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , ):
lowerCamelCase_ = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
lowerCamelCase_ = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class __A( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE = [0.8, 0.9]
def lowercase__ ( self : Any ):
lowerCamelCase_ = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = config_and_inputs[0]
lowerCamelCase_ = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
lowerCamelCase_ = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
lowerCamelCase_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
lowerCamelCase_ = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowercase__ ( self : Optional[int] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
lowerCamelCase_ = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
lowerCamelCase_ = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCamelCase_ = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
lowerCamelCase_ = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = model.generate(input_ids.to(__UpperCamelCase ) )
lowerCamelCase_ = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCamelCase_ = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 272 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__A = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
lowercase__: List[str] = git.Repo(search_parent_directories=_UpperCAmelCase )
lowercase__: Any = {
"""repo_id""": str(_UpperCAmelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(_UpperCAmelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=4 )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
if params.n_gpu <= 0:
lowercase__: Optional[int] = 0
lowercase__: List[Any] = -1
lowercase__: Optional[Any] = True
lowercase__: Union[str, Any] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__: List[str] = int(os.environ['''WORLD_SIZE'''] )
lowercase__: List[Any] = int(os.environ['''N_GPU_NODE'''] )
lowercase__: List[str] = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowercase__: Union[str, Any] = params.world_size // params.n_gpu_per_node
lowercase__: List[str] = params.global_rank // params.n_gpu_per_node
lowercase__: Optional[int] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__: List[str] = 1
lowercase__: Optional[int] = 0
lowercase__: Any = 0
lowercase__: List[Any] = 0
lowercase__: str = 1
lowercase__: List[str] = 1
lowercase__: int = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__: int = params.node_id == 0 and params.local_rank == 0
lowercase__: Tuple = params.n_nodes > 1
# summary
lowercase__: str = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 586 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase =[
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__UpperCAmelCase ={
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase ={F'''funnel-transformer/{name}''': 512 for name in _model_names}
__UpperCAmelCase ={F'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = FunnelTokenizer
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__="##" , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , clean_text=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , wordpieces_prefix=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars
):
A__ = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**UpperCamelCase__ )
A__ = do_lower_case
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ ) | 337 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def a__ ( lowercase : float, lowercase : float, lowercase : float ) -> tuple:
"""simple docstring"""
_UpperCamelCase = namedtuple('''result''', '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''', power / current )
elif current == 0:
return result('''current''', power / voltage )
elif power == 0:
return result('''power''', float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 630 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase__):
def __init__( self , *_A , **_A ) -> None:
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , _A , )
super().__init__(*_A , **_A )
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
def lowercase ( __A : int ) -> bool:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
snake_case : Optional[Any] = str(_UpperCAmelCase )
snake_case : int = """""".join(sorted(_UpperCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowercase ( __A : float = 99 ) -> int:
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
snake_case : str = 0
snake_case : Optional[Any] = 1
while True:
if check_bouncy(_UpperCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 36 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __magic_name__ ( lowercase ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowercase_ : Any = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows() | 458 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Tuple=18 , UpperCamelCase_: Any=30 , UpperCamelCase_: List[str]=400 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict=None , UpperCamelCase_: List[str]=True , ):
UpperCamelCase_ =size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =num_channels
UpperCamelCase_ =image_size
UpperCamelCase_ =min_resolution
UpperCamelCase_ =max_resolution
UpperCamelCase_ =do_resize
UpperCamelCase_ =size
UpperCamelCase_ =do_normalize
def UpperCamelCase__ ( self: Any ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "clusters" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ =os.path.join(UpperCamelCase_ , "image_processor.json" )
image_processor_first.to_json_file(UpperCamelCase_ )
UpperCamelCase_ =self.image_processing_class.from_json_file(UpperCamelCase_ ).to_dict()
UpperCamelCase_ =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCamelCase_ )
UpperCamelCase_ =self.image_processing_class.from_pretrained(UpperCamelCase_ ).to_dict()
UpperCamelCase_ =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase__ ( self: Union[str, Any] ):
pass
def _UpperCamelCase ( ):
UpperCamelCase_ =load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCamelCase_ =Image.open(dataset[4]["file"] )
UpperCamelCase_ =Image.open(dataset[5]["file"] )
UpperCamelCase_ =[imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCamelCase_ =prepare_images()
# test non-batched
UpperCamelCase_ =image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase_ =[306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCamelCase_ )
# test batched
UpperCamelCase_ =image_processing(UpperCamelCase_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase_ =[303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCamelCase_ )
| 391 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 0 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ = " " ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for index, char in enumerate(_UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE__ = index + 1
elif index + 1 == len(_UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :str = logging.get_logger(__name__)
a_ :Optional[int] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( UpperCAmelCase__ ):
lowerCamelCase : Dict = "marian"
lowerCamelCase : Dict = ["past_key_values"]
lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , _lowercase : Union[str, Any]=5_81_01 , _lowercase : Optional[Any]=None , _lowercase : Union[str, Any]=10_24 , _lowercase : Tuple=12 , _lowercase : Optional[Any]=40_96 , _lowercase : Tuple=16 , _lowercase : str=12 , _lowercase : Any=40_96 , _lowercase : Dict=16 , _lowercase : List[str]=0.0 , _lowercase : int=0.0 , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : List[Any]="gelu" , _lowercase : int=10_24 , _lowercase : Any=0.1 , _lowercase : Optional[int]=0.0 , _lowercase : List[str]=0.0 , _lowercase : str=0.02 , _lowercase : str=5_81_00 , _lowercase : Optional[Any]=False , _lowercase : str=5_81_00 , _lowercase : Any=0 , _lowercase : Tuple=0 , _lowercase : List[Any]=True , **_lowercase : Dict , ):
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = encoder_layers
SCREAMING_SNAKE_CASE__ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = dropout
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : str = activation_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Any = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowercase ( UpperCAmelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Any = {0: """batch"""}
SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Dict = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = super(_lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase__ ( self : str , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ : Any = common_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ : Any = common_inputs["""decoder_input_ids"""].shape[1]
SCREAMING_SNAKE_CASE__ : List[str] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_layers
SCREAMING_SNAKE_CASE__ : Any = min(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = max(_lowercase , _lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def lowercase__ ( self : Optional[int] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_layers
SCREAMING_SNAKE_CASE__ : Tuple = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : List[str] = common_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : int = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def lowercase__ ( self : Optional[int] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def lowercase__ ( self : str , _lowercase : List[str] , _lowercase : Tuple , _lowercase : int , _lowercase : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Any = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : Dict = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def lowercase__ ( self : Optional[int] ):
return 1E-4
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
import itertools
import os
import re
lowercase = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowercase = re.compile(r'''([a-z\d])([A-Z])''')
lowercase = re.compile(r'''(?<!_)_(?!_)''')
lowercase = re.compile(r'''(_{2,})''')
lowercase = R'''^\w+(\.\w+)*$'''
lowercase = R'''<>:/\|?*'''
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Any:
lowerCamelCase_ = _uppercase_uppercase_re.sub(R"""\1_\2""" , _UpperCAmelCase )
lowerCamelCase_ = _lowercase_uppercase_re.sub(R"""\1_\2""" , _UpperCAmelCase )
return name.lower()
def __lowerCAmelCase ( UpperCAmelCase__ : Dict ) -> str:
lowerCamelCase_ = _single_underscore_re.split(_UpperCAmelCase )
lowerCamelCase_ = [_multiple_underscores_re.split(_UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != """""" )
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) -> Any:
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_UpperCAmelCase )
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> List[str]:
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , _UpperCAmelCase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(_UpperCAmelCase )}-{split}'''
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]=None ) -> Union[str, Any]:
lowerCamelCase_ = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
lowerCamelCase_ = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
return F'''{filepath}*'''
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=None ) -> Optional[int]:
lowerCamelCase_ = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if shard_lengths:
lowerCamelCase_ = len(_UpperCAmelCase )
lowerCamelCase_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_UpperCAmelCase )]
if filetype_suffix:
lowerCamelCase_ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowerCamelCase_ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 272 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase :int = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1408 , _UpperCAmelCase=6144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-1_0 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: int = hidden_size
lowercase__: List[Any] = intermediate_size
lowercase__: List[str] = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Union[str, Any] = patch_size
lowercase__: int = image_size
lowercase__: Optional[Any] = initializer_range
lowercase__: List[str] = attention_dropout
lowercase__: List[str] = layer_norm_eps
lowercase__: Optional[Any] = hidden_act
lowercase__: Union[str, Any] = qkv_bias
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
lowercase__: List[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowercase__: Dict = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase :Dict = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Optional[int] = vocab_size
lowercase__: Union[str, Any] = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: Any = hidden_act
lowercase__: List[str] = intermediate_size
lowercase__: int = hidden_dropout_prob
lowercase__: List[Any] = attention_probs_dropout_prob
lowercase__: List[str] = max_position_embeddings
lowercase__: Union[str, Any] = initializer_range
lowercase__: Any = layer_norm_eps
lowercase__: Any = position_embedding_type
lowercase__: List[Any] = cross_attention_frequency
lowercase__: Any = encoder_hidden_size
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
lowercase__: Dict = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowercase__: List[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase :Dict = "instructblip"
_UpperCAmelCase :Tuple = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
lowercase__: Dict = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
lowercase__: Tuple = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
lowercase__: List[str] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase__: Any = InstructBlipVisionConfig(**_UpperCAmelCase )
lowercase__: List[str] = InstructBlipQFormerConfig(**_UpperCAmelCase )
lowercase__: Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowercase__: int = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
lowercase__: Any = self.text_config.tie_word_embeddings
lowercase__: Dict = self.text_config.is_encoder_decoder
lowercase__: int = num_query_tokens
lowercase__: Any = self.vision_config.hidden_size
lowercase__: Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__: Tuple = 1.0
lowercase__: Optional[int] = 0.02
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def _snake_case ( self ):
lowercase__: Any = copy.deepcopy(self.__dict__ )
lowercase__: List[Any] = self.vision_config.to_dict()
lowercase__: Dict = self.qformer_config.to_dict()
lowercase__: List[Any] = self.text_config.to_dict()
lowercase__: int = self.__class__.model_type
return output
| 586 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=64 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=5_12 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=1 , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = q_groups
A__ = k_groups
A__ = v_groups
A__ = post_attention_groups
A__ = intermediate_groups
A__ = output_groups
def lowercase_ ( self ):
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = SqueezeBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , UpperCamelCase__ )
A__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = SqueezeBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = SqueezeBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.num_labels
A__ = SqueezeBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.num_labels
A__ = SqueezeBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.num_choices
A__ = SqueezeBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase__ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase__ : int = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = False
lowercase__ : Optional[int] = True
lowercase__ : str = False
def lowercase_ ( self ):
'''simple docstring'''
A__ = SqueezeBertModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SqueezeBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
A__ = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
A__ = model(UpperCamelCase__ )[0]
A__ = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) ) | 337 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 0 |
'''simple docstring'''
from math import factorial
def a__ ( lowercase : int = 100 ) -> int:
"""simple docstring"""
return sum(map(_UpperCAmelCase, str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 98 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 , lowercase__ = 22 ):
_lowerCamelCase : List[Any] = range(1 , _UpperCAmelCase )
_lowerCamelCase : Any = range(1 , _UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }") | 630 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str ) -> list:
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
_UpperCAmelCase : str = []
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_UpperCAmelCase : int = True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_UpperCAmelCase : int = False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : str = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowercase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCAmelCase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=16, snake_case__=13, snake_case__=7, snake_case__=14, snake_case__=10, snake_case__=19, snake_case__=5, snake_case__=4, snake_case__=True, snake_case__=16, snake_case__=2, snake_case__=4, snake_case__=4, snake_case__="gelu", snake_case__=0.1, snake_case__=0.1, snake_case__=[1, 2, 3, 4, 5], snake_case__=25, snake_case__=5, ) -> Tuple:
"""simple docstring"""
lowercase_ : List[str] = d_model
lowercase_ : Optional[int] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Tuple = prediction_length
lowercase_ : str = context_length
lowercase_ : int = cardinality
lowercase_ : List[Any] = num_time_features
lowercase_ : Optional[Any] = lags_sequence
lowercase_ : str = embedding_dimension
lowercase_ : Optional[Any] = is_training
lowercase_ : Tuple = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : List[Any] = context_length
lowercase_ : str = prediction_length + label_length
lowercase_ : Union[str, Any] = label_length
lowercase_ : str = moving_average
lowercase_ : Optional[int] = autocorrelation_factor
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, label_length=self.label_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], moving_average=self.moving_average, )
def snake_case__ ( self, snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[int] = config.context_length + max(config.lags_sequence )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, 1], config.cardinality[0] )
lowercase_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase_ : List[str] = floats_tensor([self.batch_size, _past_length] )
lowercase_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase_ : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase_ : int = floats_tensor([self.batch_size, config.prediction_length] )
lowercase_ : List[Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : str = self.get_config()
lowercase_ : Dict = self.prepare_autoformer_inputs_dict(snake_case__ )
return config, inputs_dict
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Any = AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval()
lowercase_ : Optional[Any] = model(**snake_case__ )
lowercase_ : int = outputs.encoder_last_hidden_state
lowercase_ : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Any = model.get_encoder()
encoder.save_pretrained(snake_case__ )
lowercase_ : Union[str, Any] = AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowercase_ : Optional[int] = model.create_network_inputs(**snake_case__ )
lowercase_ : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase_ : Optional[int] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]), dim=-1, )
lowercase_ : Optional[Any] = encoder(inputs_embeds=snake_case__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowercase_ : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...], dim=1 )
.unsqueeze(1 )
.repeat(1, config.prediction_length, 1 )
)
lowercase_ : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device, )
lowercase_ : Union[str, Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros), dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
), dim=-1, )
lowercase_ : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean), dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
), dim=-1, )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Tuple = model.get_decoder()
decoder.save_pretrained(snake_case__ )
lowercase_ : List[Any] = AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowercase_ : Dict = decoder(
trend=snake_case__, inputs_embeds=snake_case__, encoder_hidden_states=snake_case__, )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Optional[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__a : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__a : Optional[int] = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__a : Optional[int] = False
__a : Dict = False
__a : Optional[int] = False
__a : str = False
__a : Union[str, Any] = False
__a : int = False
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Dict = AutoformerModelTester(self )
lowercase_ : Dict = ConfigTester(self, config_class=snake_case__, has_text_modality=snake_case__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowercase_ : Optional[Any] = model_class.from_pretrained(snake_case__, output_loading_info=snake_case__ )
self.assertEqual(info["""missing_keys"""], [] )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = inspect.signature(getattr(snake_case__, """forward""" ) )
# The main input is the name of the argument after `self`
lowercase_ : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name, snake_case__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(snake_case__ )
lowercase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case__ )], snake_case__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[Any] = getattr(self.model_tester, """seq_length""", snake_case__ )
lowercase_ : Tuple = getattr(self.model_tester, """decoder_seq_length""", snake_case__ )
lowercase_ : Optional[Any] = getattr(self.model_tester, """encoder_seq_length""", snake_case__ )
lowercase_ : Union[str, Any] = getattr(self.model_tester, """d_model""", snake_case__ )
lowercase_ : Tuple = getattr(self.model_tester, """num_attention_heads""", snake_case__ )
lowercase_ : List[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase_ : int = True
lowercase_ : Optional[Any] = False
lowercase_ : Any = True
lowercase_ : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase_ : Tuple = model(**self._prepare_for_class(snake_case__, snake_case__ ) )
lowercase_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : Tuple = True
lowercase_ : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(snake_case__, snake_case__ ) )
lowercase_ : Dict = outputs.encoder_attentions
self.assertEqual(len(snake_case__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, dim], )
lowercase_ : Tuple = len(snake_case__ )
lowercase_ : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case__, snake_case__ )
# decoder attentions
lowercase_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(snake_case__, (list, tuple) )
self.assertEqual(len(snake_case__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, decoder_seq_length, dim], )
# cross attentions
lowercase_ : Any = outputs.cross_attentions
self.assertIsInstance(snake_case__, (list, tuple) )
self.assertEqual(len(snake_case__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, decoder_seq_length, dim], )
# Check attention is always last and order is fine
lowercase_ : int = True
lowercase_ : Tuple = True
lowercase_ : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase_ : List[str] = model(**self._prepare_for_class(snake_case__, snake_case__ ) )
self.assertEqual(out_len + 2, len(snake_case__ ) )
lowercase_ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, dim], )
@is_flaky()
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def __magic_name__ ( lowercase="train-batch.pt" ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_UpperCAmelCase , repo_type="""dataset""" )
lowercase_ : Dict = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )
return batch
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case__ )
lowercase_ : int = prepare_batch()
with torch.no_grad():
lowercase_ : Any = model(
past_values=batch["""past_values"""], past_time_features=batch["""past_time_features"""], past_observed_mask=batch["""past_observed_mask"""], static_categorical_features=batch["""static_categorical_features"""], future_values=batch["""future_values"""], future_time_features=batch["""future_time_features"""], )[0]
lowercase_ : List[Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape, snake_case__ )
lowercase_ : Tuple = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]], device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3], snake_case__, atol=snake_case__ ) )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case__ )
lowercase_ : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowercase_ : Tuple = model(
past_values=batch["""past_values"""], past_time_features=batch["""past_time_features"""], past_observed_mask=batch["""past_observed_mask"""], static_categorical_features=batch["""static_categorical_features"""], ).encoder_last_hidden_state
lowercase_ : str = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape, snake_case__ )
lowercase_ : List[str] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]], device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3], snake_case__, atol=snake_case__ ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case__ )
lowercase_ : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowercase_ : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""], past_time_features=batch["""past_time_features"""], past_values=batch["""past_values"""], future_time_features=batch["""future_time_features"""], past_observed_mask=batch["""past_observed_mask"""], )
lowercase_ : Union[str, Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape, snake_case__ )
lowercase_ : List[Any] = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86], device=snake_case__ )
lowercase_ : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:], snake_case__, rtol=1E-1 ) ) | 458 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 0 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = "philschmid/bart-large-cnn-samsum"
__lowerCamelCase : int = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__lowerCamelCase : Union[str, Any] = "summarizer"
__lowerCamelCase : List[str] = AutoTokenizer
__lowerCamelCase : Dict = AutoModelForSeqaSeqLM
__lowerCamelCase : List[str] = ["text"]
__lowerCamelCase : Tuple = ["text"]
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: str ):
return self.pre_processor(UpperCamelCase_ , return_tensors="pt" , truncation=UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
return self.model.generate(**UpperCamelCase_ )[0]
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Dict ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 391 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
"""simple docstring"""
def A ( snake_case__ = 50 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 196 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ :Optional[int] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[Any] = ['''MobileNetV2FeatureExtractor''']
a_ :Dict = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
a_ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_ = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
lowerCamelCase_ = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_ = tensor[:sequence_length]
else:
lowerCamelCase_ = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_ = tensor[:sequence_length]
else:
lowerCamelCase_ = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCAmelCase ( UpperCAmelCase__ : Any ) -> Optional[int]:
lowerCamelCase_ = ord(_UpperCAmelCase )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowerCamelCase_ = unicodedata.category(_UpperCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class __A( UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = -1_0_0
SCREAMING_SNAKE_CASE = "pt"
def lowercase__ ( self : List[str] , __UpperCamelCase : int ):
import torch
lowerCamelCase_ = """label""" if """label""" in features[0].keys() else """labels"""
lowerCamelCase_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCamelCase_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
lowerCamelCase_ = torch.tensor(batch["""entity_ids"""] ).shape[1]
lowerCamelCase_ = self.tokenizer.padding_side
if padding_side == "right":
lowerCamelCase_ = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
lowerCamelCase_ = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
lowerCamelCase_ = [feature["""ner_tags"""] for feature in features]
lowerCamelCase_ = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = [feature["""original_entity_spans"""] for feature in features]
lowerCamelCase_ = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 272 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A = logging.get_logger(__name__)
__A = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase :Any = "dpt"
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=384 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=[2, 5, 8, 11] , _UpperCAmelCase="project" , _UpperCAmelCase=[4, 2, 1, 0.5] , _UpperCAmelCase=[96, 192, 384, 768] , _UpperCAmelCase=256 , _UpperCAmelCase=-1 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=255 , _UpperCAmelCase=0.1 , _UpperCAmelCase=[1, 1024, 24, 24] , _UpperCAmelCase=[0, 1] , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__: Dict = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowercase__: Dict = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__: Tuple = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowercase__: str = backbone_featmap_shape
lowercase__: Tuple = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowercase__: int = None
lowercase__: str = None
lowercase__: Optional[Any] = []
lowercase__: Dict = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: Dict = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Dict = hidden_dropout_prob
lowercase__: List[Any] = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: Any = layer_norm_eps
lowercase__: Tuple = image_size
lowercase__: List[str] = patch_size
lowercase__: Dict = num_channels
lowercase__: str = qkv_bias
lowercase__: List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowercase__: List[Any] = readout_type
lowercase__: str = reassemble_factors
lowercase__: Optional[Any] = neck_hidden_sizes
lowercase__: int = fusion_hidden_size
lowercase__: Dict = head_in_index
lowercase__: int = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__: Any = use_auxiliary_head
lowercase__: int = auxiliary_loss_weight
lowercase__: List[Any] = semantic_loss_ignore_index
lowercase__: Any = semantic_classifier_dropout
def _snake_case ( self ):
lowercase__: Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__: Union[str, Any] = self.backbone_config.to_dict()
lowercase__: Optional[int] = self.__class__.model_type
return output
| 586 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 337 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 0 |
'''simple docstring'''
def a__ ( lowercase : list[int], lowercase : str ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = int(_UpperCAmelCase )
# Initialize Result
_UpperCamelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowercase__ : Optional[Any] = []
lowercase__ : Union[str, Any] = '''0'''
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowercase__ : Any = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowercase__ : Tuple = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowercase__ : int = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
lowercase__ : List[Any] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F"""Following is minimal change for {value}: """)
lowercase__ : Union[str, Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 98 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = "poolformer"
def __init__( self , lowercase=3 , lowercase=16 , lowercase=16 , lowercase=3 , lowercase=4.0 , lowercase=[2, 2, 6, 2] , lowercase=[64, 128, 320, 512] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[2, 1, 1, 1] , lowercase=4 , lowercase=0.0 , lowercase="gelu" , lowercase=True , lowercase=1E-5 , lowercase=0.02 , **lowercase , ):
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : List[str] = stride
_lowerCamelCase : Dict = padding
_lowerCamelCase : Tuple = pool_size
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : Optional[Any] = mlp_ratio
_lowerCamelCase : Optional[int] = depths
_lowerCamelCase : Optional[int] = patch_sizes
_lowerCamelCase : Union[str, Any] = strides
_lowerCamelCase : Optional[int] = num_encoder_blocks
_lowerCamelCase : str = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Tuple = use_layer_scale
_lowerCamelCase : str = layer_scale_init_value
_lowerCamelCase : int = initializer_range
super().__init__(**lowercase )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self ):
return 2E-3 | 630 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase):
__a : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__a : List[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __snake_case ( self , _A , _A , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = AudioClassificationPipeline(model=_A , feature_extractor=_A )
# test with a raw waveform
_UpperCAmelCase : str = np.zeros((3_40_00,) )
_UpperCAmelCase : Any = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __snake_case ( self , _A , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = examples
_UpperCAmelCase : Union[str, Any] = audio_classifier(_A )
# by default a model is initialized with num_labels=2
self.assertEqual(
_A , [
{"""score""": ANY(_A ), """label""": ANY(_A )},
{"""score""": ANY(_A ), """label""": ANY(_A )},
] , )
_UpperCAmelCase : int = audio_classifier(_A , top_k=1 )
self.assertEqual(
_A , [
{"""score""": ANY(_A ), """label""": ANY(_A )},
] , )
self.run_torchaudio(_A )
@require_torchaudio
def __snake_case ( self , _A ) -> Any:
'''simple docstring'''
import datasets
# test with a local file
_UpperCAmelCase : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_UpperCAmelCase : Tuple = dataset[0]["""audio"""]["""array"""]
_UpperCAmelCase : int = audio_classifier(_A )
self.assertEqual(
_A , [
{"""score""": ANY(_A ), """label""": ANY(_A )},
{"""score""": ANY(_A ), """label""": ANY(_A )},
] , )
@require_torch
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = """anton-l/wav2vec2-random-tiny-classifier"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=_A )
_UpperCAmelCase : Any = np.ones((80_00,) )
_UpperCAmelCase : List[Any] = audio_classifier(_A , top_k=4 )
_UpperCAmelCase : List[Any] = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
_UpperCAmelCase : Dict = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(_A , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_UpperCAmelCase : List[str] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_UpperCAmelCase : Tuple = audio_classifier(_A , top_k=4 )
self.assertIn(nested_simplify(_A , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __snake_case ( self ) -> str:
'''simple docstring'''
import datasets
_UpperCAmelCase : int = """superb/wav2vec2-base-superb-ks"""
_UpperCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=_A )
_UpperCAmelCase : List[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_UpperCAmelCase : Any = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_UpperCAmelCase : int = audio_classifier(_A , top_k=4 )
self.assertEqual(
nested_simplify(_A , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
pass
| 238 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
from random import randint, random
def lowercase ( __A : int , __A : int , __A : int , __A : bool = False , __A : bool = False , __A : int = 5 , ) -> list:
'''simple docstring'''
snake_case : Tuple = [[-1] * number_of_cells] # Create a highway without any car
snake_case : List[Any] = 0
snake_case : Optional[int] = max(_UpperCAmelCase , 0 )
while i < number_of_cells:
snake_case : Dict = (
randint(0 , _UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowercase ( __A : list , __A : int ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = 0
snake_case : List[Any] = highway_now[car_index + 1 :]
for cell in range(len(_UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_UpperCAmelCase , -1 )
def lowercase ( __A : list , __A : float , __A : int ) -> list:
'''simple docstring'''
snake_case : str = len(_UpperCAmelCase )
# Beforce calculations, the highway is empty
snake_case : Optional[Any] = [-1] * number_of_cells
for car_index in range(_UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case : str = min(highway_now[car_index] + 1 , _UpperCAmelCase )
# Number of empty cell before the next car
snake_case : Union[str, Any] = get_distance(_UpperCAmelCase , _UpperCAmelCase ) - 1
# We can't have the car causing an accident
snake_case : int = min(next_highway[car_index] , _UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
snake_case : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowercase ( __A : list , __A : int , __A : float , __A : int ) -> list:
'''simple docstring'''
snake_case : List[str] = len(highway[0] )
for i in range(_UpperCAmelCase ):
snake_case : Union[str, Any] = update(highway[i] , _UpperCAmelCase , _UpperCAmelCase )
snake_case : List[str] = [-1] * number_of_cells
for car_index in range(_UpperCAmelCase ):
snake_case : Optional[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case : Tuple = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case : Dict = speed
highway.append(_UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ = '''docs/source/en/_toctree.yml'''
def __magic_name__ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Tuple = defaultdict(_UpperCAmelCase )
lowercase_ : Any = []
lowercase_ : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(_UpperCAmelCase )
lowercase_ : Union[str, Any] = new_doc_list
lowercase_ : str = [key for key, value in counts.items() if value > 1]
lowercase_ : Any = []
for duplicate_key in duplicates:
lowercase_ : Optional[Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(_UpperCAmelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
lowercase_ : Any = sorted(_UpperCAmelCase , key=lambda lowercase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_UpperCAmelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(_UpperCAmelCase )
# Sort
return overview_doc
def __magic_name__ ( lowercase=False ) -> Tuple:
"""simple docstring"""
with open(_UpperCAmelCase , encoding="""utf-8""" ) as f:
lowercase_ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : Union[str, Any] = content[api_idx]["""sections"""]
# Then to the model doc
lowercase_ : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase_ : Any = api_doc[scheduler_idx]["""sections"""]
lowercase_ : int = clean_doc_toc(_UpperCAmelCase )
lowercase_ : Union[str, Any] = False
if new_scheduler_doc != scheduler_doc:
lowercase_ : int = True
if overwrite:
lowercase_ : List[Any] = new_scheduler_doc
if diff:
if overwrite:
lowercase_ : List[Any] = api_doc
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __magic_name__ ( lowercase=False ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCAmelCase , encoding="""utf-8""" ) as f:
lowercase_ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : Optional[Any] = content[api_idx]["""sections"""]
# Then to the model doc
lowercase_ : str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase_ : str = False
lowercase_ : Any = api_doc[pipeline_idx]["""sections"""]
lowercase_ : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase_ : List[Any] = pipeline_doc["""section"""]
lowercase_ : List[Any] = clean_doc_toc(_UpperCAmelCase )
if overwrite:
lowercase_ : List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(_UpperCAmelCase )
# sort overall pipeline doc
lowercase_ : Optional[Any] = clean_doc_toc(_UpperCAmelCase )
if new_pipeline_docs != pipeline_docs:
lowercase_ : List[Any] = True
if overwrite:
lowercase_ : str = new_pipeline_docs
if diff:
if overwrite:
lowercase_ : Optional[Any] = api_doc
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite) | 458 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =""
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =42
for keychar, cipherchar in zip(cycle(_UpperCAmelCase ) , _UpperCAmelCase ):
UpperCamelCase_ =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase )
return decoded
def _UpperCamelCase ( A ):
UpperCamelCase_ =[]
for key in product(_UpperCAmelCase , repeat=3 ):
UpperCamelCase_ =try_key(_UpperCAmelCase , _UpperCAmelCase )
if encoded is not None:
possibles.append(_UpperCAmelCase )
return possibles
def _UpperCamelCase ( A , A ):
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( A = "p059_cipher.txt" ):
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =42
UpperCamelCase_ =Path(_UpperCAmelCase ).parent.joinpath(_UpperCAmelCase ).read_text(encoding="utf-8" )
UpperCamelCase_ =[int(_UpperCAmelCase ) for number in data.strip().split("," )]
UpperCamelCase_ =filter_valid_chars(_UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCamelCase_ =filter_common_word(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
break
UpperCamelCase_ =possibles[0]
return sum(ord(_UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 391 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 196 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( UpperCAmelCase__ ):
lowerCamelCase : torch.FloatTensor
class lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : str , _lowercase : int = 6_55_36 , _lowercase : Optional[int] = None , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 0 , _lowercase : str = "fourier" , _lowercase : bool = True , _lowercase : bool = False , _lowercase : float = 0.0 , _lowercase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowercase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowercase : Tuple[str] = "UNetMidBlock1D" , _lowercase : str = None , _lowercase : Tuple[int] = (32, 32, 64) , _lowercase : str = None , _lowercase : int = 8 , _lowercase : int = 1 , _lowercase : bool = False , ):
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE__ : Any = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_lowercase , log=_lowercase , flip_sin_to_cos=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE__ : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_lowercase , downscale_freq_shift=_lowercase )
SCREAMING_SNAKE_CASE__ : int = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE__ : Optional[Any] = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TimestepEmbedding(
in_channels=_lowercase , time_embed_dim=_lowercase , act_fn=_lowercase , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE__ : Dict = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : int = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ : Any = None
# down
SCREAMING_SNAKE_CASE__ : str = in_channels
for i, down_block_type in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = output_channel
SCREAMING_SNAKE_CASE__ : Dict = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE__ : int = i == len(_lowercase ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_down_block(
_lowercase , num_layers=_lowercase , in_channels=_lowercase , out_channels=_lowercase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_lowercase )
# mid
SCREAMING_SNAKE_CASE__ : List[Any] = get_mid_block(
_lowercase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_lowercase , add_downsample=_lowercase , )
# up
SCREAMING_SNAKE_CASE__ : str = list(reversed(_lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE__ : List[str] = out_channels
else:
SCREAMING_SNAKE_CASE__ : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = output_channel
SCREAMING_SNAKE_CASE__ : Optional[int] = (
reversed_block_out_channels[i + 1] if i < len(_lowercase ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE__ : Dict = i == len(_lowercase ) - 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_up_block(
_lowercase , num_layers=_lowercase , in_channels=_lowercase , out_channels=_lowercase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output_channel
# out
SCREAMING_SNAKE_CASE__ : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_out_block(
out_block_type=_lowercase , num_groups_out=_lowercase , embed_dim=block_out_channels[0] , out_channels=_lowercase , act_fn=_lowercase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self : str , _lowercase : torch.FloatTensor , _lowercase : Union[torch.Tensor, float, int] , _lowercase : bool = True , ):
SCREAMING_SNAKE_CASE__ : Any = timestep
if not torch.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : str = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : Any = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE__ : Any = self.time_proj(_lowercase )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE__ : Dict = self.time_mlp(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = timestep_embed[..., None]
SCREAMING_SNAKE_CASE__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE__ : List[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE__ : Tuple = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ : Optional[Any] = downsample_block(hidden_states=_lowercase , temb=_lowercase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.mid_block(_lowercase , _lowercase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE__ : List[str] = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE__ : Optional[int] = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE__ : Optional[int] = upsample_block(_lowercase , res_hidden_states_tuple=_lowercase , temb=_lowercase )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE__ : List[Any] = self.out_block(_lowercase , _lowercase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_lowercase )
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowercase = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def __lowerCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : str ) -> Union[str, Any]:
warnings.warn(_UpperCAmelCase , _UpperCAmelCase )
requires_backends(_UpperCAmelCase , """sklearn""" )
return (preds == labels).mean()
def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
warnings.warn(_UpperCAmelCase , _UpperCAmelCase )
requires_backends(_UpperCAmelCase , """sklearn""" )
lowerCamelCase_ = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ = fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
warnings.warn(_UpperCAmelCase , _UpperCAmelCase )
requires_backends(_UpperCAmelCase , """sklearn""" )
lowerCamelCase_ = pearsonr(_UpperCAmelCase , _UpperCAmelCase )[0]
lowerCamelCase_ = spearmanr(_UpperCAmelCase , _UpperCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) -> List[str]:
warnings.warn(_UpperCAmelCase , _UpperCAmelCase )
requires_backends(_UpperCAmelCase , """sklearn""" )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F'''Predictions and labels have mismatched lengths {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_UpperCAmelCase , _UpperCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_UpperCAmelCase , _UpperCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_UpperCAmelCase , _UpperCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
else:
raise KeyError(_UpperCAmelCase )
def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ) -> int:
warnings.warn(_UpperCAmelCase , _UpperCAmelCase )
requires_backends(_UpperCAmelCase , """sklearn""" )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
else:
raise KeyError(_UpperCAmelCase )
| 272 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[Any] = []
lowercase__: Optional[Any] = set({'''(''', '''[''', '''{'''} )
lowercase__: Tuple = set({''')''', ''']''', '''}'''} )
lowercase__: Optional[Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(_UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
lowercase__: int = input('''Enter sequence of brackets: ''' )
if is_balanced(_UpperCAmelCase ):
print(_UpperCAmelCase , '''is balanced''' )
else:
print(_UpperCAmelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 586 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=30 , UpperCamelCase__=4_00 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=True , UpperCamelCase__=1 / 2_55 , UpperCamelCase__=True , ):
'''simple docstring'''
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def lowercase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
A__ = image.size
else:
A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
A__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase__ : Any = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
A__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_rescale=UpperCamelCase__ )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase__ , return_tensors="pt" )
A__ = image_processing_a(UpperCamelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1e-4 ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
A__ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify area
A__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = YolosImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase__ )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) ) | 337 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.