code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a__ :
def __init__( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int]=13 , UpperCamelCase_ : Any=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=99 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Union[str, Any]=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : str=512 , UpperCamelCase_ : Tuple=16 , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Optional[int] = use_input_mask
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Optional[Any] = scope
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : str):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def a_ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = LlamaModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , ):
"""simple docstring"""
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[str] = LlamaModel(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Any = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__UpperCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = LlamaForCausalLM(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , ):
"""simple docstring"""
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = LlamaForCausalLM(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__UpperCAmelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__UpperCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1)
__UpperCAmelCase : Tuple = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["hidden_states"][0]
__UpperCAmelCase : int = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["hidden_states"][0]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3))
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = LlamaModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : List[Any] = input_dict["input_ids"]
__UpperCAmelCase : Tuple = input_ids.ne(1).to(UpperCamelCase_)
__UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Optional[Any] = LlamaForSequenceClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Optional[Any] = "single_label_classification"
__UpperCAmelCase : str = input_dict["input_ids"]
__UpperCAmelCase : List[str] = input_ids.ne(1).to(UpperCamelCase_)
__UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : str = LlamaForSequenceClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Tuple = "multi_label_classification"
__UpperCAmelCase : Union[str, Any] = input_dict["input_ids"]
__UpperCAmelCase : List[str] = input_ids.ne(1).to(UpperCamelCase_)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : Any = LlamaForSequenceClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test")
def a_ ( self : Dict):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)])
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = ids_tensor([1, 10] , config.vocab_size)
__UpperCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Any = LlamaModel(UpperCamelCase_)
original_model.to(UpperCamelCase_)
original_model.eval()
__UpperCAmelCase : Any = original_model(UpperCamelCase_).last_hidden_state
__UpperCAmelCase : Tuple = original_model(UpperCamelCase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : str = {"type": scaling_type, "factor": 10.0}
__UpperCAmelCase : Union[str, Any] = LlamaModel(UpperCamelCase_)
scaled_model.to(UpperCamelCase_)
scaled_model.eval()
__UpperCAmelCase : Optional[Any] = scaled_model(UpperCamelCase_).last_hidden_state
__UpperCAmelCase : Tuple = scaled_model(UpperCamelCase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5))
@require_torch
class a__ ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCAmelCase : List[str] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto")
__UpperCAmelCase : Optional[Any] = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
__UpperCAmelCase : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]])
torch.testing.assert_close(out.mean(-1) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Dict = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto")
__UpperCAmelCase : Any = model(torch.tensor(UpperCamelCase_))
# Expected mean on dim = -1
__UpperCAmelCase : int = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]])
torch.testing.assert_close(out.mean(-1) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCAmelCase : Any = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto")
__UpperCAmelCase : Optional[Any] = model(torch.tensor(UpperCamelCase_))
# Expected mean on dim = -1
__UpperCAmelCase : int = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]])
torch.testing.assert_close(out.mean(-1) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2)
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513])
# fmt: on
torch.testing.assert_close(out.mean(-1) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2)
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test")
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto")
__UpperCAmelCase : Tuple = model(torch.tensor(UpperCamelCase_))
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2)
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5)
@unittest.skip("Model is curently gated")
@slow
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__UpperCAmelCase : int = "Simply put, the theory of relativity states that "
__UpperCAmelCase : Union[str, Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
__UpperCAmelCase : str = tokenizer.encode(UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=UpperCamelCase_)
# greedy generation outputs
__UpperCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=64 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_)
__UpperCAmelCase : List[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
| 77 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
A = """true"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple:
"""simple docstring"""
set_seed(42 )
__UpperCAmelCase : Dict = RegressionModel()
__UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase )
__UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase )
model.to(accelerator.device )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase )
return model, ddp_model, dataloader
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(UpperCamelCase ):
__UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
with accelerator.main_process_first():
__UpperCAmelCase : str = dataset.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
__UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase ):
if use_longest:
return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase )
__UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches )
__UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = []
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase : int = batch.values()
with torch.no_grad():
__UpperCAmelCase : int = model(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCamelCase )
targs.append(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase )
return logits, targs
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert (
len(UpperCamelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}"
def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase )
# First do baseline
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"]
model.to(UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(UpperCamelCase )
with torch.inference_mode():
__UpperCAmelCase : List[str] = model(**UpperCamelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] )
__UpperCAmelCase : str = metric.compute()
# Then do distributed
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__UpperCAmelCase : Any = model(**UpperCamelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : Union[str, Any] = batch["labels"]
__UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(UpperCamelCase , UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(UpperCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__UpperCAmelCase : Any = Accelerator()
test_torch_metrics(UpperCamelCase , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 77 | 1 |
import numpy as np
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def __eq__( self : List[str] , __lowerCamelCase : str ):
return self.position == cell.position
def _snake_case ( self : Optional[int] ):
print(self.position )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=(5, 5) ):
SCREAMING_SNAKE_CASE = np.zeros(__lowerCamelCase )
SCREAMING_SNAKE_CASE = world_size[0]
SCREAMING_SNAKE_CASE = world_size[1]
def _snake_case ( self : Optional[int] ):
print(self.w )
def _snake_case ( self : int , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE = cell.position[0]
SCREAMING_SNAKE_CASE = cell.position[1]
SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE = current_x + n[0]
SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (x, y)
SCREAMING_SNAKE_CASE = cell
neighbours.append(__lowerCamelCase )
return neighbours
def __a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
_open.append(A__ )
while _open:
SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(A__ ) )
if current == goal:
break
for n in world.get_neigbours(A__ ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE = current.g + 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = n.position
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = goal.position
SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A__ )
SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A : int = Gridworld()
# Start position and goal
__A : List[Any] = Cell()
__A : Optional[int] = (0, 0)
__A : List[Any] = Cell()
__A : Optional[Any] = (4, 4)
print(f'path from {start.position} to {goal.position}')
__A : Optional[int] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A : Optional[int] = 1
print(world.w) | 698 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_UpperCamelCase = dict(zip(A__ , range(len(A__))))
_UpperCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_UpperCamelCase = {'''unk_token''': '''<unk>'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(A__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A__))
_UpperCamelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_UpperCamelCase = os.path.join(self.tmpdirname , A__)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(A__ , A__)
def UpperCAmelCase ( self , **__a) -> Dict:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__)
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__)
def UpperCAmelCase ( self , **__a) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCamelCase = [Image.fromarray(np.moveaxis(A__ , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__)
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , A__)
self.assertIsInstance(processor_fast.tokenizer , A__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , A__)
self.assertIsInstance(processor_fast.image_processor , A__)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
_UpperCamelCase = self.get_image_processor(do_normalize=A__ , padding_value=1.0)
_UpperCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , A__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , A__)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(A__ , return_tensors='''np''')
_UpperCamelCase = processor(images=A__ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=A__)
_UpperCamelCase = tokenizer(A__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=A__ , images=A__)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(images=A__ , visual_prompt=A__)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''conditional_pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = CLIPSegProcessor(tokenizer=A__ , image_processor=A__)
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(A__)
_UpperCamelCase = tokenizer.batch_decode(A__)
self.assertListEqual(A__ , A__)
| 19 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : int = 1
__snake_case : bool = True
__snake_case : bool = False
__snake_case : bool = False
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : Optional[Any] = []
_A : str = []
for i in range(self.num_layers ):
_A : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
_A : str = FlaxResnetBlockaD(
in_channels=A__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(A__ )
_A : List[str] = resnets
_A : Any = attentions
if self.add_downsample:
_A : Any = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__ ,A__=True ):
_A : List[str] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
_A : Optional[Any] = resnet(A__ ,A__ ,deterministic=A__ )
_A : Tuple = attn(A__ ,A__ ,deterministic=A__ )
output_states += (hidden_states,)
if self.add_downsample:
_A : Optional[int] = self.downsamplers_a(A__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : bool = True
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : List[Any] = []
for i in range(self.num_layers ):
_A : int = self.in_channels if i == 0 else self.out_channels
_A : List[Any] = FlaxResnetBlockaD(
in_channels=A__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : Tuple = resnets
if self.add_downsample:
_A : Optional[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__=True ):
_A : List[Any] = ()
for resnet in self.resnets:
_A : Optional[int] = resnet(A__ ,A__ ,deterministic=A__ )
output_states += (hidden_states,)
if self.add_downsample:
_A : List[Any] = self.downsamplers_a(A__ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : int = 1
__snake_case : bool = True
__snake_case : bool = False
__snake_case : bool = False
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : List[str] = []
_A : Optional[Any] = []
for i in range(self.num_layers ):
_A : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
_A : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(A__ )
_A : Dict = resnets
_A : int = attentions
if self.add_upsample:
_A : str = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__ ,A__ ,A__=True ):
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
_A : List[Any] = res_hidden_states_tuple[-1]
_A : int = res_hidden_states_tuple[:-1]
_A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_A : str = resnet(A__ ,A__ ,deterministic=A__ )
_A : Optional[int] = attn(A__ ,A__ ,deterministic=A__ )
if self.add_upsample:
_A : Union[str, Any] = self.upsamplers_a(A__ )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : bool = True
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
_A : Optional[Any] = []
for i in range(self.num_layers ):
_A : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_A : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : int = resnets
if self.add_upsample:
_A : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,A__ ,A__ ,A__ ,A__=True ):
for resnet in self.resnets:
# pop res hidden states
_A : Tuple = res_hidden_states_tuple[-1]
_A : Optional[int] = res_hidden_states_tuple[:-1]
_A : Any = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
_A : Optional[Any] = resnet(A__ ,A__ ,deterministic=A__ )
if self.add_upsample:
_A : Tuple = self.upsamplers_a(A__ )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
__snake_case : int
__snake_case : float = 0.0
__snake_case : int = 1
__snake_case : int = 1
__snake_case : bool = False
__snake_case : bool = False
__snake_case : jnp.dtype = jnp.floataa
def A__ ( self ):
# there is always at least one resnet
_A : List[str] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
_A : Union[str, Any] = []
for _ in range(self.num_layers ):
_A : Any = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(A__ )
_A : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(A__ )
_A : List[str] = resnets
_A : str = attentions
def __call__( self ,A__ ,A__ ,A__ ,A__=True ):
_A : Optional[int] = self.resnets[0](A__ ,A__ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
_A : Dict = attn(A__ ,A__ ,deterministic=A__ )
_A : Any = resnet(A__ ,A__ ,deterministic=A__ )
return hidden_states
| 206 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase : Tuple = args.log_outputs
_lowercase : List[str] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_lowercase : List[Any] = load_metric('wer' )
_lowercase : Dict = load_metric('cer' )
# compute metrics
_lowercase : str = wer.compute(references=result['target'] ,predictions=result['prediction'] )
_lowercase : List[str] = cer.compute(references=result['target'] ,predictions=result['prediction'] )
# print & log results
_lowercase : Optional[Any] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCAmelCase )
with open(F'''{dataset_id}_eval_results.txt''' ,'w' ) as f:
f.write(__UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_lowercase : Optional[int] = F'''log_{dataset_id}_predictions.txt'''
_lowercase : Dict = F'''log_{dataset_id}_targets.txt'''
with open(__UpperCAmelCase ,'w' ) as p, open(__UpperCAmelCase ,'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCAmelCase ,__UpperCAmelCase ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCAmelCase ,with_indices=__UpperCAmelCase )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[int] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_lowercase : Optional[Any] = re.sub(__UpperCAmelCase ,'' ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_lowercase : Union[str, Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
_lowercase : Any = ' '.join(text.split(__UpperCAmelCase ) )
return text
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Any = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=__UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_lowercase : str = AutoFeatureExtractor.from_pretrained(args.model_id )
_lowercase : str = feature_extractor.sampling_rate
# resample audio
_lowercase : Optional[int] = dataset.cast_column('audio' ,Audio(sampling_rate=__UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
_lowercase : Union[str, Any] = 0 if torch.cuda.is_available() else -1
_lowercase : Optional[int] = pipeline('automatic-speech-recognition' ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(__UpperCAmelCase ):
_lowercase : List[str] = asr(
batch['audio']['array'] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
_lowercase : str = prediction['text']
_lowercase : List[Any] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_lowercase : List[str] = dataset.map(__UpperCAmelCase ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCAmelCase ,__UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 283 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 283 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
A_ = {
"yjernite/retribert-base-uncased": 512,
}
A_ = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : List[Any] = RetriBertTokenizer
__lowerCamelCase : Tuple = ["input_ids", "attention_mask"]
def __init__( self: Optional[Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]="[UNK]" , UpperCamelCase_: Optional[int]="[SEP]" , UpperCamelCase_: List[str]="[PAD]" , UpperCamelCase_: Dict="[CLS]" , UpperCamelCase_: List[Any]="[MASK]" , UpperCamelCase_: List[str]=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: List[Any] , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase_ =getattr(UpperCamelCase_ , normalizer_state.pop("type" ) )
UpperCamelCase_ =do_lower_case
UpperCamelCase_ =strip_accents
UpperCamelCase_ =tokenize_chinese_chars
UpperCamelCase_ =normalizer_class(**UpperCamelCase_ )
UpperCamelCase_ =do_lower_case
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any]=None ):
UpperCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
UpperCamelCase_ =self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 391 |
"""simple docstring"""
from manim import *
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("CPU" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[mem.copy() for i in range(1 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("GPU" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.align_to(UpperCamelCase_ , UpperCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("Model" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , )
UpperCamelCase_ =MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCamelCase_ =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ =MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=2.5 ) , Write(UpperCamelCase_ ) , Write(UpperCamelCase_ ) )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[]
UpperCamelCase_ =[]
UpperCamelCase_ =[]
for i, rect in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase_ )
cpu_target.generate_target()
UpperCamelCase_ =0.46 / 4
UpperCamelCase_ =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase_ , buff=0.0 )
cpu_targs.append(UpperCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase_ ) )
second_animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(*UpperCamelCase_ )
self.wait()
| 391 | 1 |
import math
from collections.abc import Callable
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
lowercase : float = xa
lowercase : float = xa
while True:
if x_n == x_na or function(__magic_name__ ) == function(__magic_name__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
lowercase : float = x_na - (
function(__magic_name__ ) / ((function(__magic_name__ ) - function(__magic_name__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowercase : Tuple = x_na
lowercase : List[str] = x_na
def snake_case( __magic_name__ ) -> float:
'''simple docstring'''
return math.pow(__magic_name__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 708 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _A :
def __init__( self : int ) -> Any:
"""simple docstring"""
lowercase : List[Any] = ''''''
lowercase : Optional[int] = ''''''
lowercase : str = []
lowercase : List[Any] = 0
lowercase : str = 256
lowercase : Dict = 0
lowercase : Optional[int] = 0
lowercase : List[str] = 0
lowercase : str = 0
def __a ( self : List[str] , _A : int ) -> int:
"""simple docstring"""
lowercase : List[Any] = cva.imread(_A , 0 )
lowercase : List[Any] = copy.deepcopy(self.img )
lowercase , lowercase , lowercase : int = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowercase : Optional[int] = np.sum(_A )
for i in range(len(_A ) ):
lowercase : Optional[int] = x[i] / self.k
self.sk += prk
lowercase : Tuple = (self.L - 1) * self.sk
if self.rem != 0:
lowercase : Tuple = int(last % last )
lowercase : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_A )
lowercase : Dict = int(np.ma.count(self.img ) / self.img[1].size )
lowercase : Optional[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase : Any = self.img[j][i]
if num != self.last_list[num]:
lowercase : Dict = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __a ( self : Dict ) -> Any:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase_ = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 596 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 1_00 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , ):
"""simple docstring"""
if audio_length_in_s is None:
snake_case_ = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case_ = audio_length_in_s * self.unet.config.sample_rate
snake_case_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
snake_case_ = int(snake_case__ )
if sample_size % down_scale_factor != 0:
snake_case_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
snake_case_ = int(snake_case__ )
snake_case_ = next(iter(self.unet.parameters() ) ).dtype
snake_case_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case_ = randn_tensor(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
# set step values
self.scheduler.set_timesteps(snake_case__ , device=audio.device )
snake_case_ = self.scheduler.timesteps.to(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(snake_case__ , snake_case__ ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case_ = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
snake_case_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=snake_case__ )
| 187 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="imagegpt"
a : Union[str, Any] =["past_key_values"]
a : Optional[Any] ={
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=512 + 1 , snake_case__=32 * 32 , snake_case__=512 , snake_case__=24 , snake_case__=8 , snake_case__=None , snake_case__="quick_gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : List[Any] = n_positions
lowerCAmelCase : Union[str, Any] = n_embd
lowerCAmelCase : str = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Optional[Any] = n_inner
lowerCAmelCase : Dict = activation_function
lowerCAmelCase : str = resid_pdrop
lowerCAmelCase : Optional[int] = embd_pdrop
lowerCAmelCase : Optional[int] = attn_pdrop
lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Union[str, Any] = scale_attn_weights
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCAmelCase : Optional[int] = reorder_and_upcast_attn
lowerCAmelCase : int = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def lowercase__ ( self , snake_case__ , snake_case__ = 1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 32 , snake_case__ = 32 , ):
"""simple docstring"""
lowerCAmelCase : Tuple = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 645 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase_ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Optional[Any] = None
_snake_case : List[str] = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
_snake_case : str = os.path.abspath("""examples""" )
for item in os.listdir(SCREAMING_SNAKE_CASE__ ):
if item not in EXCLUDE_EXAMPLES:
_snake_case : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE__ , feature_script=SCREAMING_SNAKE_CASE__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
_snake_case : Union[str, Any] = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = """\n""".join(SCREAMING_SNAKE_CASE__ )
if special_strings is not None:
for string in special_strings:
_snake_case : int = diff.replace(SCREAMING_SNAKE_CASE__ , """""" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , """""" )
def __lowerCamelCase( self ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , SCREAMING_SNAKE_CASE__ )
self.one_complete_example("""complete_nlp_example.py""" , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
_snake_case : int = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.one_complete_example("""complete_cv_example.py""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = False
@classmethod
def __lowerCamelCase( cls ):
"""simple docstring"""
super().setUpClass()
_snake_case : Dict = tempfile.mkdtemp()
_snake_case : str = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_snake_case : Optional[int] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __lowerCamelCase( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[str] = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Dict = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
_snake_case : Dict = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
_snake_case : Optional[int] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""epoch 0:""" , SCREAMING_SNAKE_CASE__ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : int = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
_snake_case : Any = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
if torch.cuda.is_available():
_snake_case : str = torch.cuda.device_count()
else:
_snake_case : List[str] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , SCREAMING_SNAKE_CASE__ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE__ )
else:
self.assertIn("""epoch 0:""" , SCREAMING_SNAKE_CASE__ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Tuple = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
_snake_case : Optional[int] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = re.findall("""({.+})""" , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = [r for r in results if """accuracy""" in r][-1]
_snake_case : Optional[int] = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Optional[int] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCamelCase( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_snake_case : Optional[int] = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """tracking""" ) ) )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Union[str, Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[str] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 720 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase_ = ['''bert-base-uncased''', '''bert-base-cased''']
UpperCAmelCase_ = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
super().__init__()
_snake_case : int = tokenizer
_snake_case : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = TFAutoModel.from_config(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : str = self.tokenizer(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = self.bert(**SCREAMING_SNAKE_CASE__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase( self ):
"""simple docstring"""
super().setUp()
_snake_case : Tuple = [
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_snake_case : List[str] = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Tuple = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_snake_case : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowerCamelCase( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_snake_case : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""tf""" , padding="""longest""" )
_snake_case : Optional[int] = tf_tokenizer(SCREAMING_SNAKE_CASE__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Dict = tf_tokenizer(self.paired_sentences )
_snake_case : Any = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Tuple = tf.function(SCREAMING_SNAKE_CASE__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
_snake_case : int = tf.constant(SCREAMING_SNAKE_CASE__ )
_snake_case : str = compiled_tokenizer(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = tf_tokenizer(SCREAMING_SNAKE_CASE__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowerCamelCase( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_snake_case : List[str] = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = tf.convert_to_tensor(self.test_sentences )
_snake_case : int = model(SCREAMING_SNAKE_CASE__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ) / """saved.model"""
model.save(SCREAMING_SNAKE_CASE__ )
_snake_case : str = tf.keras.models.load_model(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = loaded_model(SCREAMING_SNAKE_CASE__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 519 | 0 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
snake_case : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
snake_case : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __magic_name__ ( self )-> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def __magic_name__ ( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None )-> str:
_SCREAMING_SNAKE_CASE = fa_score(
A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ )
return {"f1": float(A_ ) if score.size == 1 else score}
| 605 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
def __magic_name__ ( self )-> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE = SpeechTaTokenizer(A_ )
_SCREAMING_SNAKE_CASE = AddedToken('<mask>' , lstrip=A_ , rstrip=A_ )
_SCREAMING_SNAKE_CASE = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = 'this is a test'
_SCREAMING_SNAKE_CASE = 'this is a test'
return input_text, output_text
def __magic_name__ ( self , A_ , A_=False , A_=20 , A_=5 )-> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_input_output_texts(A_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A_ , add_special_tokens=A_ )
_SCREAMING_SNAKE_CASE = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
return text, ids
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = '<pad>'
_SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(A_ ) , 81 )
def __magic_name__ ( self )-> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_SCREAMING_SNAKE_CASE = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_SCREAMING_SNAKE_CASE = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_SCREAMING_SNAKE_CASE = tokenizer.add_tokens(A_ )
_SCREAMING_SNAKE_CASE = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
_SCREAMING_SNAKE_CASE = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_SCREAMING_SNAKE_CASE = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_SCREAMING_SNAKE_CASE = tokenizer.add_special_tokens(A_ )
_SCREAMING_SNAKE_CASE = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
_SCREAMING_SNAKE_CASE = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self )-> List[str]:
pass
def __magic_name__ ( self )-> Dict:
pass
def __magic_name__ ( self )-> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(A_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(A_ )
# fmt: off
self.assertListEqual(A_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __magic_name__ ( self )-> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
_SCREAMING_SNAKE_CASE = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_SCREAMING_SNAKE_CASE = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A_ , )
| 605 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCamelCase_ : Any = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowerCamelCase_ : Tuple = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowerCamelCase_ : Dict = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
A_ : List[str] = np.array([re.sub(snake_case_ , '' , snake_case_ ) for x in predictions] )
A_ : int = np.array([re.sub(snake_case_ , '' , snake_case_ ) for x in references] )
else:
A_ : List[Any] = np.asarray(snake_case_ )
A_ : int = np.asarray(snake_case_ )
if ignore_case:
A_ : Union[str, Any] = np.char.lower(snake_case_ )
A_ : Union[str, Any] = np.char.lower(snake_case_ )
if ignore_punctuation:
A_ : Optional[Any] = string.punctuation.maketrans('' , '' , string.punctuation )
A_ : Tuple = np.char.translate(snake_case_ , table=snake_case_ )
A_ : Tuple = np.char.translate(snake_case_ , table=snake_case_ )
if ignore_numbers:
A_ : List[Any] = string.digits.maketrans('' , '' , string.digits )
A_ : List[Any] = np.char.translate(snake_case_ , table=snake_case_ )
A_ : Union[str, Any] = np.char.translate(snake_case_ , table=snake_case_ )
A_ : Dict = predictions == references
return {"exact_match": np.mean(snake_case_ ) * 1_0_0} | 302 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A_ : Optional[Any] = model_type_to_module_name(_UpperCAmelCase )
A_ : Optional[int] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A_ : Optional[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
"""simple docstring"""
A_ : List[Any] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = kwargs.pop('config' , snake_case_ )
A_ : Optional[Any] = kwargs.pop('trust_remote_code' , snake_case_ )
A_ : Any = True
A_ , A_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case_ , **snake_case_ )
A_ : List[Any] = config_dict.get('feature_extractor_type' , snake_case_ )
A_ : Optional[int] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
A_ : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
A_ : Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.feature_extractor_type``
A_ : str = getattr(snake_case_ , 'feature_extractor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
A_ : List[str] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
A_ : Union[str, Any] = feature_extractor_class_from_name(snake_case_ )
A_ : Dict = feature_extractor_auto_map is not None
A_ : Tuple = feature_extractor_class is not None or type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING
A_ : Optional[int] = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
A_ : Optional[int] = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
A_ : str = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING:
A_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(snake_case_ )]
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case_ , snake_case_ ) | 302 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int ) -> list:
"""simple docstring"""
__a = word.split()
def justify(SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Optional[int] ) -> str:
__a = max_width - width
__a = len(__snake_case )
if len(__snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__a = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__a = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__a = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__snake_case ):
num_spaces_between_words_list[i] += 1
__a = []
for i in range(__snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__snake_case )
__a = []
__a = []
__a = 0
for word in words:
if width + len(__snake_case ) + len(__snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__snake_case )
width += len(__snake_case )
else:
# justify the line and add it to result
answer.append(justify(__snake_case, __snake_case, __snake_case ) )
# reset new line and new width
__a , __a = [word], len(__snake_case )
__a = max_width - width - len(__snake_case )
answer.append(' '.join(__snake_case ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 448 |
from __future__ import annotations
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> bool:
_A = get_failure_array(__snake_case )
# 2) Step through text searching for pattern
_A , _A = 0, 0 # index into text, pattern
while i < len(__snake_case ):
if pattern[j] == text[i]:
if j == (len(__snake_case ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_A = failure[j - 1]
continue
i += 1
return False
def UpperCAmelCase__ ( __snake_case ) -> list[int]:
_A = [0]
_A = 0
_A = 1
while j < len(__snake_case ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_A = failure[i - 1]
continue
j += 1
failure.append(__snake_case )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCamelCase = """abc1abc12"""
__lowerCamelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCamelCase = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCamelCase = """ABABX"""
__lowerCamelCase = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
__lowerCamelCase = """AAAB"""
__lowerCamelCase = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
__lowerCamelCase = """abcdabcy"""
__lowerCamelCase = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
__lowerCamelCase = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] | 317 | 0 |
from ....utils import logging
__a : int = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=20_48 ) -> Optional[Any]:
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels | 522 | import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__a : Union[str, Any] = 2_5_0_0_0_4
__a : Optional[int] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : str = MBartTokenizer
__a : int = MBartTokenizerFast
__a : List[Any] = True
__a : int = True
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = '''facebook/mbart-large-en-ro'''
__a : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__a : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__a : List[Any] = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ) -> Any:
'''simple docstring'''
__lowercase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowercase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__lowercase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowercase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__lowercase = 10
__lowercase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , ) | 522 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __snake_case ,__snake_case=False ) -> Tuple:
try:
__lowerCAmelCase : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCAmelCase : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCAmelCase : Tuple = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__snake_case : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
__snake_case : Optional[int] = parse_flag_from_env('RUN_REMOTE', default=False)
__snake_case : Any = parse_flag_from_env('RUN_LOCAL', default=True)
__snake_case : List[str] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__snake_case : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__snake_case : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__snake_case : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__snake_case : Optional[int] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__snake_case : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__snake_case : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__snake_case : Dict = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __snake_case ) -> int:
try:
import faiss # noqa
except ImportError:
__lowerCAmelCase : Optional[int] = unittest.skip("test requires faiss" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> str:
try:
import regex # noqa
except ImportError:
__lowerCAmelCase : str = unittest.skip("test requires regex" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> int:
try:
import elasticsearch # noqa
except ImportError:
__lowerCAmelCase : int = unittest.skip("test requires elasticsearch" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> Tuple:
try:
import sqlalchemy # noqa
except ImportError:
__lowerCAmelCase : List[str] = unittest.skip("test requires sqlalchemy" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> Union[str, Any]:
if not config.TORCH_AVAILABLE:
__lowerCAmelCase : Optional[Any] = unittest.skip("test requires PyTorch" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> List[Any]:
if not config.TF_AVAILABLE:
__lowerCAmelCase : Dict = unittest.skip("test requires TensorFlow" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> List[str]:
if not config.JAX_AVAILABLE:
__lowerCAmelCase : Optional[Any] = unittest.skip("test requires JAX" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> Tuple:
if not config.PIL_AVAILABLE:
__lowerCAmelCase : Optional[int] = unittest.skip("test requires Pillow" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__snake_case )
else:
return test_case
def _lowercase ( __snake_case ) -> Union[str, Any]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__snake_case )
else:
return test_case
def _lowercase ( __snake_case ) -> str:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__snake_case )
else:
return test_case
def _lowercase ( __snake_case ) -> Union[str, Any]:
def _require_spacy_model(__snake_case ):
try:
import spacy # noqa F401
spacy.load(__snake_case )
except ImportError:
return unittest.skip("test requires spacy" )(__snake_case )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(__snake_case ) )(__snake_case )
else:
return test_case
return _require_spacy_model
def _lowercase ( __snake_case ) -> Optional[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__snake_case )
else:
return test_case
def _lowercase ( __snake_case ) -> Union[str, Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__snake_case )
else:
return test_case
def _lowercase ( __snake_case ) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
__lowerCAmelCase : List[Any] = unittest.skip("test is slow" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> List[str]:
if not _run_local_tests or _run_local_tests == 0:
__lowerCAmelCase : Optional[int] = unittest.skip("test is local" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> Any:
if not _run_packaged_tests or _run_packaged_tests == 0:
__lowerCAmelCase : List[str] = unittest.skip("test is packaged" )(__snake_case )
return test_case
def _lowercase ( __snake_case ) -> Optional[int]:
if not _run_remote_tests or _run_remote_tests == 0:
__lowerCAmelCase : List[str] = unittest.skip("test requires remote" )(__snake_case )
return test_case
def _lowercase ( *__snake_case ) -> Optional[int]:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__snake_case ) and name.startswith("test" ):
for decorator in decorators:
__lowerCAmelCase : str = decorator(__snake_case )
setattr(cls ,__snake_case ,__snake_case )
return cls
return decorate
class A__ ( __a ):
'''simple docstring'''
pass
class A__ ( __a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __snake_case=OfflineSimulationMode.CONNECTION_FAILS ,__snake_case=1e-1_6 ) -> str:
__lowerCAmelCase : Optional[int] = requests.Session().request
def timeout_request(__snake_case ,__snake_case ,__snake_case ,**__snake_case ):
# Change the url to an invalid url so that the connection hangs
__lowerCAmelCase : Optional[int] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__lowerCAmelCase : str = timeout
try:
return online_request(__snake_case ,__snake_case ,**__snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__lowerCAmelCase : Union[str, Any] = url
__lowerCAmelCase : int = e.args[0]
__lowerCAmelCase : List[str] = (max_retry_error.args[0].replace("10.255.255.1" ,F"""OfflineMock[{url}]""" ),)
__lowerCAmelCase : str = (max_retry_error,)
raise
def raise_connection_error(__snake_case ,__snake_case ,**__snake_case ):
raise requests.ConnectionError("Offline mode is enabled." ,request=__snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" ,__snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" ,__snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" ,__snake_case ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def _lowercase ( *__snake_case ,**__snake_case ) -> int:
__lowerCAmelCase : List[str] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__snake_case ,**__snake_case ) as tmp_dir:
try:
os.chdir(__snake_case )
yield
finally:
os.chdir(__snake_case )
@contextmanager
def _lowercase ( ) -> Tuple:
import gc
gc.collect()
__lowerCAmelCase : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ) -> Dict:
import gc
gc.collect()
__lowerCAmelCase : Optional[int] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __snake_case ,__snake_case ) -> Any:
return deepcopy(__snake_case ).integers(0 ,100 ,10 ).tolist() == deepcopy(__snake_case ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __snake_case ) -> Dict:
import decorator
from requests.exceptions import HTTPError
def _wrapper(__snake_case ,*__snake_case ,**__snake_case ):
try:
return func(*__snake_case ,**__snake_case )
except HTTPError as err:
if str(__snake_case ).startswith("500" ) or str(__snake_case ).startswith("502" ):
pytest.xfail(str(__snake_case ) )
raise err
return decorator.decorator(_wrapper ,__snake_case )
class A__ :
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = returncode
__lowerCAmelCase : Tuple = stdout
__lowerCAmelCase : Union[str, Any] = stderr
async def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
while True:
__lowerCAmelCase : int = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,__snake_case=False ,__snake_case=False ) -> _RunOutput:
if echo:
print("\nRunning: " ," ".join(__snake_case ) )
__lowerCAmelCase : Any = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__snake_case ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__snake_case ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : int = []
def tee(__snake_case ,__snake_case ,__snake_case ,__snake_case="" ):
__lowerCAmelCase : str = line.decode("utf-8" ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case ,__snake_case ,file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __snake_case : tee(__snake_case ,__snake_case ,sys.stdout ,label="stdout:" ) ),
_read_stream(p.stderr ,lambda __snake_case : tee(__snake_case ,__snake_case ,sys.stderr ,label="stderr:" ) ),
] ,timeout=__snake_case ,)
return _RunOutput(await p.wait() ,__snake_case ,__snake_case )
def _lowercase ( __snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=180 ,__snake_case=False ,__snake_case=True ) -> _RunOutput:
__lowerCAmelCase : Dict = asyncio.get_event_loop()
__lowerCAmelCase : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__snake_case ,env=__snake_case ,stdin=__snake_case ,timeout=__snake_case ,quiet=__snake_case ,echo=__snake_case ) )
__lowerCAmelCase : Dict = " ".join(__snake_case )
if result.returncode > 0:
__lowerCAmelCase : Tuple = "\n".join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""\'{cmd_str}\' produced no output.""" )
return result
def _lowercase ( ) -> Dict:
__lowerCAmelCase : List[str] = os.environ.get("PYTEST_XDIST_WORKER" ,"gw0" )
__lowerCAmelCase : Optional[int] = re.sub(r"^gw" ,"" ,__snake_case ,0 ,re.M )
return int(__snake_case )
def _lowercase ( ) -> Dict:
__lowerCAmelCase : List[str] = 29_500
__lowerCAmelCase : List[Any] = pytest_xdist_worker_id()
return port + uniq_delta | 293 | '''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ ( __snake_case : list[Any]) -> None:
create_state_space_tree(__snake_case , [] , 0)
def snake_case_ ( __snake_case : list[Any] , __snake_case : list[Any] , __snake_case : int) -> None:
if index == len(__snake_case):
print(__snake_case)
return
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 274 | 0 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A__ = grid[0]
for row_n in range(1 ,len(lowerCAmelCase__ ) ):
A__ = grid[row_n]
A__ = fill_row(lowerCAmelCase__ ,lowerCAmelCase__ )
A__ = grid[row_n]
return grid[-1][-1]
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
current_row[0] += row_above[0]
for cell_n in range(1 ,len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 554 | 1 |
from collections import defaultdict
def lowercase ( __A : int ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = 1
snake_case : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(__A )
if ret % 2 == 0:
cuts.append(__A )
return ret
def lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowercase , __lowercase : Union[str, Any] = 10, 9
__lowercase : Dict = defaultdict(list)
__lowercase : dict[int, bool] = {}
__lowercase : list[int] = []
__lowercase : Optional[Any] = 0
__lowercase : Optional[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 36 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
def __init__( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str=1_3 ,__SCREAMING_SNAKE_CASE : Optional[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Tuple=9_9 ,__SCREAMING_SNAKE_CASE : str=3_2 ,__SCREAMING_SNAKE_CASE : Any=2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=4 ,__SCREAMING_SNAKE_CASE : Tuple=3_7 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : List[Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.1 ,__SCREAMING_SNAKE_CASE : Tuple=5_1_2 ,__SCREAMING_SNAKE_CASE : Dict=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[str]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : Dict=1_0_0_0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = t
UpperCAmelCase = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMModel(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = TFLayoutLMForMaskedLM(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFLayoutLMForTokenClassification(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = TFLayoutLMForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : int = True
_UpperCAmelCase : Union[str, Any] = 10
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = TFLayoutLMModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 )
def _UpperCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[str] ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFLayoutLMModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _UpperCAmelCase ( self : List[str] ):
pass
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
UpperCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the sequence output on [0, :3, :3]
UpperCAmelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] ,)
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# initialize model with randomly initialized sequence classification head
UpperCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=2 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=tf.convert_to_tensor([1, 1] ) ,)
# test whether we get a loss as a scalar
UpperCAmelCase = outputs.loss
UpperCAmelCase = (2,)
self.assertEqual(loss.shape ,__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = (2, 2)
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Tuple ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" ,num_labels=1_3 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(
input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = outputs.logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : List[Any] ):
# initialize model with randomly initialized token classification head
UpperCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase = model(input_ids=__SCREAMING_SNAKE_CASE ,bbox=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
# test the shape of the logits
UpperCAmelCase = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape ,__SCREAMING_SNAKE_CASE )
self.assertEqual(outputs.end_logits.shape ,__SCREAMING_SNAKE_CASE )
| 333 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 12 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE__ : str = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE__ : Dict = fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , """:""" , val.size() )
else:
print(__lowerCAmelCase , """:""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE__ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : List[str] = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE__ : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : Dict = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = param.view(*__lowerCAmelCase )
return param
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
# The converted output model.
SCREAMING_SNAKE_CASE__ : List[str] = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE__ : List[str] = input_state_dict.get("""args""" , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE__ : Dict = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE__ : List[str] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE__ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 0.0
# The model.
SCREAMING_SNAKE_CASE__ : Any = input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE__ : Any = model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE__ : str = lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE__ : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE__ : Any = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE__ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE__ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE__ : List[Any] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE__ : str = layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE__ : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE__ : str = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE__ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE__ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE__ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = masked_bias
SCREAMING_SNAKE_CASE__ : List[str] = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE__ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE__ : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : Any = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
SCREAMING_SNAKE_CASE__ : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE__ : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : int = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE__ : str = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE__ : Tuple = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> List[Any]:
# Create the argument parser.
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__lowerCAmelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__lowerCAmelCase , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : int = input_state_dict.get("""args""" , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE__ : Dict = """gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu_new"""
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE__ : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE__ : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE__ : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE__ : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE__ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type(__lowerCAmelCase ).__name__
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 12 | 1 |
import torch
from diffusers import DiffusionPipeline
class __A ( snake_case__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case ):
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case )
def __call__( self ):
_lowerCAmelCase : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Any = self.unet(_snake_case , _snake_case ).sample
_lowerCAmelCase : Optional[int] = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
_lowerCAmelCase : List[Any] = scheduler_output - scheduler_output + torch.ones_like(_snake_case )
return result
| 424 | snake_case = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 424 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : int = len(__snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : Dict = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : int = True
lowerCamelCase_ : Any = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : Dict = longest_subsequence(__snake_case )
if len(__snake_case ) > len(__snake_case ):
lowerCamelCase_ : Optional[Any] = temp_array
else:
i += 1
lowerCamelCase_ : int = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : Union[str, Any] = [pivot, *longest_subsequence(__snake_case )]
if len(__snake_case ) > len(__snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ : Optional[Any] = json.loads(_lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ : List[str] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ : List[str] = json.loads(_lowercase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , _lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase__ (self ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , A , )
@cached_property
def UpperCAmelCase__ (self ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
lowerCamelCase_ : Optional[int] = torch.device('''cpu''' )
lowerCamelCase_ : Dict = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ : int = smp.local_rank()
lowerCamelCase_ : str = torch.device('''cuda''' , A )
lowerCamelCase_ : Tuple = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
lowerCamelCase_ : List[Any] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
lowerCamelCase_ : Union[str, Any] = torch.device('''cuda''' , self.local_rank )
lowerCamelCase_ : List[str] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ : List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ : Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
lowerCamelCase_ : Any = torch.device('''cuda''' , self.local_rank )
lowerCamelCase_ : List[str] = 1
if device.type == "cuda":
torch.cuda.set_device(A )
return device
@property
def UpperCAmelCase__ (self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase__ (self ):
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase__ (self ):
return False
| 357 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class __lowercase ( __lowerCamelCase ):
snake_case_ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Whether to SortishSamler or not."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """whether to use adafactor"""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
snake_case_ = field(default=__lowerCamelCase , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
snake_case_ = field(
default=__lowerCamelCase , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
snake_case_ = field(
default="""linear""" , metadata={"""help""": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 65 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,):
'''simple docstring'''
super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A )
UpperCAmelCase__ : str = feature_size
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : Dict = hop_length
UpperCAmelCase__ : int = win_length
UpperCAmelCase__ : Dict = frame_signal_scale
UpperCAmelCase__ : Dict = preemphasis_coeff
UpperCAmelCase__ : str = mel_floor
UpperCAmelCase__ : Any = normalize_means
UpperCAmelCase__ : str = normalize_vars
UpperCAmelCase__ : int = win_function
UpperCAmelCase__ : List[Any] = return_attention_mask
UpperCAmelCase__ : str = win_length * sampling_rate // 1_000
UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase__ : int = optimal_fft_length(self.sample_size )
UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1
def __lowercase ( self : Union[str, Any] ,A : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A )
else:
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function )
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
UpperCAmelCase__ : Optional[Any] = spectrogram(
one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase__ : Any = np.subtract(A ,A )
if self.normalize_vars:
UpperCAmelCase__ : str = x[:input_length].std(axis=0 )
UpperCAmelCase__ : Optional[int] = np.divide(A ,A )
if input_length < x.shape[0]:
UpperCAmelCase__ : int = padding_value
# make sure array is in float32
UpperCAmelCase__ : str = x.astype(np.floataa )
return x
def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )]
def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : Any = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} )
UpperCAmelCase__ : Optional[Any] = self.pad(
A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,)
# make sure list is in array format
UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,A ):
UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features]
UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase__ : Union[str, Any] = (
np.array(A ,dtype=np.intaa )
if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase__ : Any = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=A )
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 65 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE : Optional[Any] = "AutoImageProcessor"
__SCREAMING_SNAKE_CASE : Dict = "AutoTokenizer"
def __init__( self , lowercase_ , lowercase_ ) -> Tuple:
super().__init__(lowercase_ , lowercase_ )
UpperCAmelCase = self.image_processor
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def a_ ( self , *lowercase_ , **lowercase_ ) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def a_ ( self , *lowercase_ , **lowercase_ ) -> Any:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> Optional[int]:
return ["input_ids", "attention_mask", "pixel_values"]
| 183 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@slow
@require_torch
def a_ ( self ) -> List[Any]:
UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase = bertabert.config.encoder.vocab_size
UpperCAmelCase = tokenizer.sep_token_id
UpperCAmelCase = tokenizer.cls_token_id
UpperCAmelCase = 1_2_8
UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCAmelCase = train_dataset.select(range(3_2 ) )
UpperCAmelCase = val_dataset.select(range(1_6 ) )
UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowercase_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=5_1_2 )
UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=1_2_8 )
UpperCAmelCase = inputs.input_ids
UpperCAmelCase = inputs.attention_mask
UpperCAmelCase = outputs.input_ids
UpperCAmelCase = outputs.input_ids.copy()
UpperCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCAmelCase = outputs.attention_mask
assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ ):
UpperCAmelCase = pred.label_ids
UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 183 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase_ = model(__a , labels=__a ).loss
UpperCAmelCase_ = -tf.math.reduce_mean(__a ).numpy()
UpperCAmelCase_ = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 78 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
UpperCAmelCase_= Dataset.from_dict(lowerCAmelCase_ )
return dataset
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= get_dataset()
UpperCAmelCase_= make_duplicate_clusters(__UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_= get_dataset()
UpperCAmelCase_, UpperCAmelCase_= deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __UpperCAmelCase )
| 593 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase_ :
@property
def __a ( self ):
return self.get_dummy_input()
@property
def __a ( self ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def __a ( self , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ):
_lowercase : Optional[Any] = 4
_lowercase : Optional[int] = 3_2
_lowercase : Optional[Any] = (3_2, 3_2)
_lowercase : int = torch.manual_seed(0 )
_lowercase : Any = torch.device(_lowerCAmelCase )
_lowercase : Any = (batch_size, num_channels) + sizes
_lowercase : Union[str, Any] = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'hidden_states': hidden_states}
if include_temb:
_lowercase : List[Any] = 1_2_8
_lowercase : str = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
_lowercase : List[str] = torch.manual_seed(1 )
_lowercase : List[str] = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
_lowercase : List[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(_lowerCAmelCase )
if include_skip_sample:
_lowercase : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def __a ( self ):
_lowercase : str = {
'in_channels': 3_2,
'out_channels': 3_2,
'temb_channels': 1_2_8,
}
if self.block_type == "up":
_lowercase : Optional[int] = 3_2
if self.block_type == "mid":
init_dict.pop('out_channels' )
_lowercase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __a ( self , _lowerCAmelCase ):
_lowercase , _lowercase : str = self.prepare_init_args_and_inputs_for_common()
_lowercase : Tuple = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
_lowercase : Dict = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = output[0]
self.assertEqual(output.shape , self.output_shape )
_lowercase : Any = output[0, -1, -3:, -3:]
_lowercase : List[str] = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def __a ( self ):
_lowercase , _lowercase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
_lowercase : List[Any] = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_lowercase : Optional[Any] = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = output[0]
_lowercase : int = torch.device(_lowerCAmelCase )
_lowercase : Any = randn_tensor(output.shape , device=_lowerCAmelCase )
_lowercase : Dict = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward()
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__: str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__magic_name__: Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: List[str] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__magic_name__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
return "lower newer", "lower newer"
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__magic_name__: Union[str, Any] = """lower"""
__magic_name__: Optional[int] = ["""low""", """er</w>"""]
__magic_name__: List[Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__: Optional[int] = tokens + ["""<unk>"""]
__magic_name__: List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__: str = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
__magic_name__: Dict = """This is a simple input"""
__magic_name__: Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__: int = ("""This is a simple input""", """This is a pair""")
__magic_name__: int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
| 96 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> int:
__magic_name__: Optional[int] = 0
if start < end:
__magic_name__: Union[str, Any] = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Tuple = temp
__magic_name__, __magic_name__: int = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = 0
__magic_name__: str = randint(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[int] = a[end]
__magic_name__: Optional[int] = a[pivot]
__magic_name__: Optional[int] = temp
__magic_name__: Dict = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__magic_name__: List[Any] = new_pivot_index + 1
__magic_name__: Any = a[new_pivot_index]
__magic_name__: int = a[index]
__magic_name__: Union[str, Any] = temp
__magic_name__: List[Any] = a[new_pivot_index + 1]
__magic_name__: Union[str, Any] = a[end]
__magic_name__: Dict = temp
return new_pivot_index + 1, count
__lowerCamelCase = TemporaryFile()
__lowerCamelCase = 1_00 # 1000 elements are to be sorted
__lowerCamelCase , __lowerCamelCase = 0, 1 # mean and standard deviation
__lowerCamelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__lowerCamelCase = np.load(outfile)
__lowerCamelCase = len(M) - 1
__lowerCamelCase = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 96 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case = logging.getLogger(__name__)
snake_case = 50 # max width of layer names
snake_case = 70 # max width of quantizer names
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCamelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCamelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCamelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCamelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCamelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCamelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ : int = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ : List[str] = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ : Any = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowerCAmelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ )
lowerCAmelCase__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase_ , ["embeddings"] , which="weight" , _disabled=lowerCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase_ , [""] , _disabled=lowerCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase_ , lowerCamelCase_ )
if args.clip_gelu:
clip_gelu(lowerCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def fusea(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ : Optional[int] = qq._amax.detach().item()
lowerCAmelCase__ : Optional[int] = qk._amax.detach().item()
lowerCAmelCase__ : Any = qv._amax.detach().item()
lowerCAmelCase__ : List[str] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
qq._amax.fill_(lowerCamelCase_ )
qk._amax.fill_(lowerCamelCase_ )
qv._amax.fill_(lowerCamelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ )
lowerCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ : Optional[int] = mod.weight.shape[0]
lowerCAmelCase__ : List[str] = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ : List[str] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCAmelCase__ : str = amax
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=2_5 , lowerCamelCase_=1_8_0 , lowerCamelCase_=None ):
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ : str = []
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : List[Any] = [ignore]
lowerCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase_ , "weight" ):
continue
lowerCAmelCase__ : str = max(lowerCamelCase_ , len(lowerCamelCase_ ) )
for name, mod in model.named_modules():
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , "weight" ):
continue
if type(lowerCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]:
continue
lowerCAmelCase__ : List[Any] = f'''Act:{input_q.extra_repr()}'''
lowerCAmelCase__ : Optional[int] = f'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase__ : int = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase_ ) <= line_width:
logger.info(lowerCamelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase_ , lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="both" , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_input_quantizer" ) or hasattr(lowerCamelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
| 568 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple:
# Input as list
_lowerCamelCase : Any = list(poly_a or [0])[:]
_lowerCamelCase : Optional[Any] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase : int = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase : Union[str, Any] = len(self.polyB)
# Add 0 to make lengths equal a power of 2
_lowerCamelCase : List[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
_lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
_lowerCamelCase : int = self.__multiply()
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE) <= 1:
return dft[0]
#
_lowerCamelCase : str = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)]
_lowerCamelCase : Tuple = self.root**next_ncol
# First half of next step
_lowerCamelCase : int = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
_lowerCamelCase : Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
_lowerCamelCase : Union[str, Any] = new_dft
_lowerCamelCase : List[str] = next_ncol // 2
return dft[0]
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Optional[Any] = self.__dft("""A""")
_lowerCamelCase : List[str] = self.__dft("""B""")
_lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase : List[str] = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)]
_lowerCamelCase : List[Any] = self.root ** (next_ncol // 2)
_lowerCamelCase : str = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
_lowerCamelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self) -> Any:
_lowerCamelCase : Dict = """A = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
_lowerCamelCase : List[Any] = """B = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
_lowerCamelCase : int = """A*B = """ + """ + """.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product))
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> int:
lowercase : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase : List[Any] = n - k
# Calculate C(n,k)
for i in range(_A ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( _A ) -> int:
return binomial_coefficient(2 * node_count , _A ) // (node_count + 1)
def UpperCamelCase ( _A ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
lowercase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase ( _A ) -> int:
return catalan_number(_A ) * factorial(_A )
if __name__ == "__main__":
_lowerCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 264 | 0 |
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Any , __lowercase :Dict ):
__lowerCamelCase : List[Any] =arr.split(''',''' )
def __lowercase ( self :int ):
__lowerCamelCase : int =[int(self.array[0] )] * len(self.array )
__lowerCamelCase : Any =[int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowerCamelCase : Tuple =max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowerCamelCase : List[str] =max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCamelCase = input('please input some numbers:')
_UpperCamelCase = SubArray(whole_array)
_UpperCamelCase = array.solve_sub_array()
print(('the results is:', re))
| 703 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
__lowerCamelCase : int =from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowerCamelCase : str =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowerCamelCase : List[str] =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =''''''
__lowerCamelCase : List[str] =flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
__lowerCamelCase : List[str] =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : Tuple =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCamelCase : Optional[int] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Union[str, Any] =jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCamelCase : Optional[Any] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Dict =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCamelCase : str =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowerCamelCase : Optional[int] ='''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowerCamelCase : Optional[Any] =list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 363 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , lowerCAmelCase : int = 128 , lowerCAmelCase : int = 256 , lowerCAmelCase : float = 20_00.0 , lowerCAmelCase : int = 768 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 12 , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 2048 , lowerCAmelCase : float = 0.1 , ) -> int:
"""simple docstring"""
super().__init__()
__UpperCamelCase : List[str] = nn.Sequential(
nn.Linear(lowerCAmelCase , d_model * 4 , bias=lowerCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase ) , nn.SiLU() , )
__UpperCamelCase : Any = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Tuple = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__UpperCamelCase : Any = nn.Dropout(p=lowerCAmelCase )
__UpperCamelCase : int = nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
# FiLM conditional T5 decoder
__UpperCamelCase : Optional[Any] = DecoderLayer(d_model=lowerCAmelCase , d_kv=lowerCAmelCase , num_heads=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase )
self.decoders.append(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = TaLayerNorm(lowerCAmelCase )
__UpperCamelCase : Any = nn.Dropout(p=lowerCAmelCase )
__UpperCamelCase : Any = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Any ) -> int:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__UpperCamelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__UpperCamelCase : Dict = self.conditioning_emb(lowerCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__UpperCamelCase : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__UpperCamelCase : Optional[int] = torch.broadcast_to(
torch.arange(lowerCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__UpperCamelCase : Any = self.position_encoding(lowerCAmelCase )
__UpperCamelCase : Any = self.continuous_inputs_projection(lowerCAmelCase )
inputs += position_encodings
__UpperCamelCase : List[str] = self.dropout(lowerCAmelCase )
# decoder: No padding present.
__UpperCamelCase : str = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__UpperCamelCase : Tuple = [(x, self.encoder_decoder_mask(lowerCAmelCase , lowerCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__UpperCamelCase : List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__UpperCamelCase : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__UpperCamelCase : int = lyr(
lowerCAmelCase , conditioning_emb=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )[0]
__UpperCamelCase : List[Any] = self.decoder_norm(lowerCAmelCase )
__UpperCamelCase : List[str] = self.post_dropout(lowerCAmelCase )
__UpperCamelCase : List[Any] = self.spec_out(lowerCAmelCase )
return spec_out
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=1E-6 ) -> Any:
"""simple docstring"""
super().__init__()
__UpperCamelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase , d_kv=lowerCAmelCase , num_heads=lowerCAmelCase , dropout_rate=lowerCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase , d_kv=lowerCAmelCase , num_heads=lowerCAmelCase , dropout_rate=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase ) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , ) -> Dict:
"""simple docstring"""
__UpperCamelCase : List[str] = self.layer[0](
lowerCAmelCase , conditioning_emb=lowerCAmelCase , attention_mask=lowerCAmelCase , )
if encoder_hidden_states is not None:
__UpperCamelCase : int = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__UpperCamelCase : List[Any] = self.layer[1](
lowerCAmelCase , key_value_states=lowerCAmelCase , attention_mask=lowerCAmelCase , )
# Apply Film Conditional Feed Forward layer
__UpperCamelCase : Union[str, Any] = self.layer[-1](lowerCAmelCase , lowerCAmelCase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict = TaLayerNorm(lowerCAmelCase )
__UpperCamelCase : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase )
__UpperCamelCase : List[str] = Attention(query_dim=lowerCAmelCase , heads=lowerCAmelCase , dim_head=lowerCAmelCase , out_bias=lowerCAmelCase , scale_qk=lowerCAmelCase )
__UpperCamelCase : List[Any] = nn.Dropout(lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , ) -> str:
"""simple docstring"""
__UpperCamelCase : List[Any] = self.layer_norm(lowerCAmelCase )
if conditioning_emb is not None:
__UpperCamelCase : Any = self.FiLMLayer(lowerCAmelCase , lowerCAmelCase )
# Self-attention block
__UpperCamelCase : Optional[int] = self.attention(lowerCAmelCase )
__UpperCamelCase : Any = hidden_states + self.dropout(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__UpperCamelCase : List[str] = Attention(query_dim=lowerCAmelCase , heads=lowerCAmelCase , dim_head=lowerCAmelCase , out_bias=lowerCAmelCase , scale_qk=lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = TaLayerNorm(lowerCAmelCase , eps=lowerCAmelCase )
__UpperCamelCase : Tuple = nn.Dropout(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[Any] = self.layer_norm(lowerCAmelCase )
__UpperCamelCase : Optional[Any] = self.attention(
lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__UpperCamelCase : List[str] = hidden_states + self.dropout(lowerCAmelCase )
return layer_output
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
super().__init__()
__UpperCamelCase : Union[str, Any] = TaDenseGatedActDense(d_model=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase )
__UpperCamelCase : List[str] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase )
__UpperCamelCase : str = TaLayerNorm(lowerCAmelCase , eps=lowerCAmelCase )
__UpperCamelCase : List[Any] = nn.Dropout(lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]=None ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.layer_norm(lowerCAmelCase )
if conditioning_emb is not None:
__UpperCamelCase : Dict = self.film(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : List[Any] = self.DenseReluDense(lowerCAmelCase )
__UpperCamelCase : Tuple = hidden_states + self.dropout(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__UpperCamelCase : int = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__UpperCamelCase : int = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
__UpperCamelCase : Optional[Any] = nn.Dropout(lowerCAmelCase )
__UpperCamelCase : str = NewGELUActivation()
def lowerCamelCase__ ( self : int , lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.act(self.wi_a(lowerCAmelCase ) )
__UpperCamelCase : Union[str, Any] = self.wi_a(lowerCAmelCase )
__UpperCamelCase : Tuple = hidden_gelu * hidden_linear
__UpperCamelCase : int = self.dropout(lowerCAmelCase )
__UpperCamelCase : List[Any] = self.wo(lowerCAmelCase )
return hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=1E-6 ) -> Dict:
"""simple docstring"""
super().__init__()
__UpperCamelCase : Tuple = nn.Parameter(torch.ones(lowerCAmelCase ) )
__UpperCamelCase : Any = eps
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase )
__UpperCamelCase : Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__UpperCamelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def lowerCamelCase__ ( self : str , lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase , 3.0 )) ))
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
super().__init__()
__UpperCamelCase : int = nn.Linear(lowerCAmelCase , out_features * 2 , bias=lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = self.scale_bias(lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = torch.chunk(lowerCAmelCase , 2 , -1 )
__UpperCamelCase : Tuple = x * (1 + scale) + shift
return x
| 279 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Dict:
_a : List[Any] = inspect.getfile(accelerate.test_utils )
_a : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_a : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
_a : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __lowercase ( self ) -> int:
print(F"""Found {torch.cuda.device_count()} devices.""" )
_a : Tuple = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> List[str]:
print(F"""Found {torch.cuda.device_count()} devices.""" )
_a : Optional[int] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __lowercase ( self ) -> Dict:
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_a : Optional[int] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
a__ = Accelerator()
a__ = (accelerator.state.process_index + 2, 10)
a__ = torch.randint(0, 10, shape).to(accelerator.device)
a__ = ''''''
a__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 578 |
import os
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.dirname(__a ) + '''/grid.txt''' ) as f:
_a : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__a ) for x in f.readline().split()] )
_a : List[str] = 0
# right
for i in range(20 ):
for j in range(17 ):
_a : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_a : Any = temp
# down
for i in range(17 ):
for j in range(20 ):
_a : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_a : int = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_a : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_a : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
_a : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_a : Optional[int] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 578 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """data2vec-text"""
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 579 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
if isinstance(__a , __a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
A__ = deepcopy(__a )
elif os.path.exists(__a ):
with io.open(__a , 'r' , encoding='utf-8' ) as f:
A__ = json.load(__a )
else:
try:
A__ = baseaa.urlsafe_baadecode(__a ).decode('utf-8' )
A__ = json.loads(__a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
A__ = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.get_value('zero_optimization.stage' , -1 )
# offload
A__ = False
if self.is_zeroa() or self.is_zeroa():
A__ = set(['cpu', 'nvme'] )
A__ = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
A__ = True
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
A__ = nodes.pop()
for node in nodes:
A__ = config.get(__a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , __a , __a=None ):
"""simple docstring"""
A__ , A__ = self.find_config_node(__a )
if config is None:
return default
return config.get(__a , __a )
def _UpperCAmelCase ( self , __a , __a=False ):
"""simple docstring"""
A__ = self.config
# find the config node of interest if it exists
A__ = ds_key_long.split('.' )
for node in nodes:
A__ = config
A__ = config.get(__a )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else bool(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.get_value(__a )
return False if value is None else not bool(__a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 2
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._stage == 3
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self._offload
class snake_case_ :
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
A__ = engine
def _UpperCAmelCase ( self , __a , **__a ):
"""simple docstring"""
self.engine.backward(__a , **__a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a ):
"""simple docstring"""
super().__init__(__a , device_placement=__a , scaler=__a )
A__ = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , __a=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , __a , __a ):
"""simple docstring"""
super().__init__(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=0.001 , __a=0 , **__a ):
"""simple docstring"""
A__ = params
A__ = lr
A__ = weight_decay
A__ = kwargs
class snake_case_ :
"""simple docstring"""
def __init__( self , __a , __a=None , __a=0 , **__a ):
"""simple docstring"""
A__ = optimizer
A__ = total_num_steps
A__ = warmup_num_steps
A__ = kwargs
| 260 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = ''''''
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = ''''''
for i in range(len(snake_case__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = int('''0b''' + data[0] + data[-1] , 2 )
A : Tuple = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = message[:4]
A : Optional[int] = message[4:]
A : Dict = apply_table(snake_case__ , snake_case__ )
A : Union[str, Any] = xor(snake_case__ , snake_case__ )
A : Optional[int] = apply_sbox(snake_case__ , temp[:4] ) # noqa: E741
A : Optional[Any] = apply_sbox(snake_case__ , temp[4:] )
A : Union[str, Any] = '''0''' * (2 - len(snake_case__ )) + l # noqa: E741
A : Optional[int] = '''0''' * (2 - len(snake_case__ )) + r
A : Union[str, Any] = apply_table(l + r , snake_case__ )
A : int = xor(snake_case__ , snake_case__ )
return temp + right
if __name__ == "__main__":
lowercase : Tuple = input('Enter 10 bit key: ')
lowercase : str = input('Enter 8 bit message: ')
lowercase : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase : Optional[Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase : Any = [2, 4, 3, 1]
lowercase : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase : Dict = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase : Optional[Any] = apply_table(key, paa_table)
lowercase : Dict = temp[:5]
lowercase : int = temp[5:]
lowercase : Tuple = left_shift(left)
lowercase : Optional[int] = left_shift(right)
lowercase : Optional[int] = apply_table(left + right, pa_table)
lowercase : Union[str, Any] = left_shift(left)
lowercase : Optional[Any] = left_shift(right)
lowercase : List[Any] = left_shift(left)
lowercase : Union[str, Any] = left_shift(right)
lowercase : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase : Dict = apply_table(message, IP)
lowercase : str = function(expansion, sa, sa, keya, temp)
lowercase : Tuple = temp[4:] + temp[:4]
lowercase : Optional[Any] = function(expansion, sa, sa, keya, temp)
lowercase : Any = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowercase : Dict = apply_table(CT, IP)
lowercase : int = function(expansion, sa, sa, keya, temp)
lowercase : str = temp[4:] + temp[:4]
lowercase : int = function(expansion, sa, sa, keya, temp)
lowercase : Dict = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 343 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Dict = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A, A : List[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : Tuple = input('Enter numbers separated by a comma:\n').strip()
lowercase : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 343 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: List[str] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: str = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: Tuple = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase: List[str] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase: Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase: List[str] = DDPMScheduler()
_UpperCamelCase: Optional[int] = AudioDiffusionPipeline(vqvae=_lowerCamelCase , unet=self.dummy_unet , mel=_lowerCamelCase , scheduler=_lowerCamelCase )
_UpperCamelCase: int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCamelCase: int = torch.Generator(device=_lowerCamelCase ).manual_seed(42 )
_UpperCamelCase: Optional[int] = pipe(generator=_lowerCamelCase , steps=4 )
_UpperCamelCase: Tuple = output.audios[0]
_UpperCamelCase: List[Any] = output.images[0]
_UpperCamelCase: Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(42 )
_UpperCamelCase: int = pipe(generator=_lowerCamelCase , steps=4 , return_dict=_lowerCamelCase )
_UpperCamelCase: List[str] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase: List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase: int = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase: str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase: Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase: List[Any] = DDIMScheduler()
_UpperCamelCase: str = self.dummy_vqvae_and_unet
_UpperCamelCase: List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_lowerCamelCase , scheduler=_lowerCamelCase )
_UpperCamelCase: Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
np.random.seed(0 )
_UpperCamelCase: int = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase: Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(42 )
_UpperCamelCase: Tuple = pipe(raw_audio=_lowerCamelCase , generator=_lowerCamelCase , start_step=5 , steps=10 )
_UpperCamelCase: Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase: Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase: Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase: Optional[Any] = self.dummy_unet_condition
_UpperCamelCase: Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_lowerCamelCase , mel=_lowerCamelCase , scheduler=_lowerCamelCase )
_UpperCamelCase: List[str] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
np.random.seed(0 )
_UpperCamelCase: Optional[int] = torch.rand((1, 1, 10) )
_UpperCamelCase: Union[str, Any] = pipe(generator=_lowerCamelCase , encoding=_lowerCamelCase )
_UpperCamelCase: Optional[int] = output.images[0]
_UpperCamelCase: List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase: List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: str = torch_device
_UpperCamelCase: Union[str, Any] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase: int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCamelCase: Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(42 )
_UpperCamelCase: Optional[int] = pipe(generator=_lowerCamelCase )
_UpperCamelCase: Optional[Any] = output.audios[0]
_UpperCamelCase: Union[str, Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase: Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase: Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 271 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCamelCase = trt.Logger(trt.Logger.WARNING)
_lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
_lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
_lowerCamelCase = args.per_device_eval_batch_size
_lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCamelCase = True
_lowerCamelCase = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
_lowerCamelCase = '''temp_engine/bert-fp16.engine'''
if args.inta:
_lowerCamelCase = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
_lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase_ )
# start time
__SCREAMING_SNAKE_CASE : Tuple = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase_ ) for d_inp in d_inputs] + [int(lowercase_ ), int(lowercase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__SCREAMING_SNAKE_CASE : List[str] = time.time()
__SCREAMING_SNAKE_CASE : int = end_time - start_time
__SCREAMING_SNAKE_CASE : int = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCamelCase = raw_datasets['''validation'''].column_names
_lowerCamelCase = '''question''' if '''question''' in column_names else column_names[0]
_lowerCamelCase = '''context''' if '''context''' in column_names else column_names[1]
_lowerCamelCase = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCamelCase = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
_lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase_ ( lowercase_ : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=lowercase_ , stride=args.doc_stride , return_overflowing_tokens=lowercase_ , return_offsets_mapping=lowercase_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__SCREAMING_SNAKE_CASE : Optional[int] = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__SCREAMING_SNAKE_CASE : Any = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__SCREAMING_SNAKE_CASE : int = tokenized_examples.sequence_ids(lowercase_ )
__SCREAMING_SNAKE_CASE : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__SCREAMING_SNAKE_CASE : str = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__SCREAMING_SNAKE_CASE : List[str] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_lowerCamelCase = raw_datasets['''validation''']
# Validation Feature Creation
_lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
_lowerCamelCase = default_data_collator
_lowerCamelCase = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
_lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase_ ( lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any]="eval" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = postprocess_qa_predictions(
examples=lowercase_ , features=lowercase_ , predictions=lowercase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__SCREAMING_SNAKE_CASE : int = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__SCREAMING_SNAKE_CASE : Any = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase_ , label_ids=lowercase_ )
_lowerCamelCase = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase_ ( lowercase_ : Any ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(lowercase_ ) ) * engine.get_binding_dtype(lowercase_ ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCamelCase = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
_lowerCamelCase = 0.0
_lowerCamelCase = 0
_lowerCamelCase = timeit.default_timer()
_lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
_lowerCamelCase , _lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = torch.tensor(start_logits)
_lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
_lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
_lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
_lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
_lowerCamelCase = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
_lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 674 | 0 |
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : list[list[str]] , UpperCamelCase_ : int , ) -> None:
snake_case__ =len(UpperCamelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase_ , UpperCamelCase_ , )
def a ( UpperCamelCase_ : int ) -> None:
snake_case__ =[]
depth_first_search([] , [] , [] , UpperCamelCase_ , UpperCamelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase_ )
print('' )
print(len(UpperCamelCase_ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 712 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Optional[int]:
# Initialise PyTorch model
snake_case__ =TaConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case__ =TaForConditionalGeneration(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 581 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0 ) -> str:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or n < 0:
raise ValueError("Invalid input" )
A__ = 1_0**n
A__ = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , SCREAMING_SNAKE_CASE_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 514 |
from datetime import datetime
import requests
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
A__ = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
A__ = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(SCREAMING_SNAKE_CASE_ ).content
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter Video/IGTV url: """).strip()
lowerCAmelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 514 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0 , UpperCamelCase__ : int = 2_2 ):
"""simple docstring"""
__UpperCAmelCase = range(1 , UpperCamelCase__ )
__UpperCAmelCase = range(1 , UpperCamelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 654 | '''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 76 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : Any , lowercase_ : int=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE : str = nn.Parameter(lowercase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(lowercase_ )
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase_ ( lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE : Any = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE : Tuple = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase_ ).view(-1 , lowercase_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : List[str] , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE : Tuple = weights[0][1]
if len(lowercase_ ) < 4:
set_layer_weights_in_torch_lsh(lowercase_ , torch_block.attention , lowercase_ )
else:
set_layer_weights_in_torch_local(lowercase_ , torch_block.attention , lowercase_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase_ ) == 4:
__SCREAMING_SNAKE_CASE : List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE : int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE : Any = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowercase_ ) , )
if isinstance(weights[3] , lowercase_ ):
__SCREAMING_SNAKE_CASE : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.tensor(lowercase_ ) )
__SCREAMING_SNAKE_CASE : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase_ , lowercase_ , lowercase_ )
# output layer norm
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowercase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase_ ) , )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = ReformerConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : List[str] = ReformerModelWithLMHead(lowercase_ )
with open(lowercase_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : int = pickle.load(lowercase_ )['''weights''']
set_model_weights_in_torch(lowercase_ , lowercase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 674 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "blip_2_vision_model"
def __init__( self : Tuple , __lowerCamelCase : Any=14_08 , __lowerCamelCase : Union[str, Any]=61_44 , __lowerCamelCase : Any=39 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : Tuple=14 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : str=0.00001 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=1e-10 , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : Union[str, Any] , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
A : Dict = hidden_size
A : Union[str, Any] = intermediate_size
A : Optional[int] = num_hidden_layers
A : Tuple = num_attention_heads
A : List[str] = patch_size
A : Any = image_size
A : List[Any] = initializer_range
A : Union[str, Any] = attention_dropout
A : int = layer_norm_eps
A : Dict = hidden_act
A : Dict = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
A : int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
A : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "blip_2_qformer"
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any]=3_05_22 , __lowerCamelCase : Union[str, Any]=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Optional[int]=30_72 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=5_12 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : Any=2 , __lowerCamelCase : str=14_08 , **__lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
A : List[Any] = vocab_size
A : Optional[int] = hidden_size
A : Dict = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = hidden_act
A : List[str] = intermediate_size
A : Optional[Any] = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : Tuple = max_position_embeddings
A : Optional[int] = initializer_range
A : Optional[Any] = layer_norm_eps
A : Optional[int] = position_embedding_type
A : List[str] = cross_attention_frequency
A : Union[str, Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
A : Union[str, Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
A : Optional[int] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = "blip-2"
a__ = True
def __init__( self : Union[str, Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=32 , **__lowerCamelCase : Any ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
if vision_config is None:
A : List[str] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
A : Dict = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
A : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
A : List[Any] = BlipaVisionConfig(**__lowerCamelCase )
A : List[Any] = BlipaQFormerConfig(**__lowerCamelCase )
A : Union[str, Any] = text_config["model_type"] if "model_type" in text_config else "opt"
A : Tuple = CONFIG_MAPPING[text_model_type](**__lowerCamelCase )
A : Tuple = self.text_config.tie_word_embeddings
A : Union[str, Any] = self.text_config.is_encoder_decoder
A : Union[str, Any] = num_query_tokens
A : str = self.vision_config.hidden_size
A : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A : Any = 1.0
A : Optional[int] = 0.02
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , __lowerCamelCase : BlipaVisionConfig , __lowerCamelCase : BlipaQFormerConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Tuple , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
A : str = copy.deepcopy(self.__dict__ )
A : List[str] = self.vision_config.to_dict()
A : int = self.qformer_config.to_dict()
A : Dict = self.text_config.to_dict()
A : Optional[int] = self.__class__.model_type
return output | 709 |
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__SCREAMING_SNAKE_CASE = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__SCREAMING_SNAKE_CASE = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Tuple="binary" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple="warn" , ) -> Optional[Any]:
A : str = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score} | 17 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__UpperCamelCase = 5
__UpperCamelCase = 10
@require_sentencepiece
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = SpeechaTextTokenizer
lowercase__: Optional[int] = False
lowercase__: Union[str, Any] = True
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
__snake_case : Tuple = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
__snake_case : int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
__snake_case : int = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Union[str, Any] = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__snake_case : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : Any = """<pad>"""
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 10_01 )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__snake_case : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_89, 50, 14, 1_74, 3_86] , )
__snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__snake_case : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class _A ( unittest.TestCase ):
lowercase__: List[Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowercase__: Union[str, Any] = '''C\'est trop cool'''
lowercase__: Union[str, Any] = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
__snake_case : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__snake_case : Any = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
__snake_case : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = """fr"""
__snake_case : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__snake_case : Any = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Dict ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( _A , _A , _A , _A ) -> List[Any]: # noqa: E741
while r - l > 1:
lowercase : List[str] = (l + r) // 2
if v[m] >= key:
lowercase : Dict = m
else:
lowercase : List[Any] = m # noqa: E741
return r
def UpperCamelCase ( _A ) -> int:
if len(_A ) == 0:
return 0
lowercase : Union[str, Any] = [0] * len(_A )
lowercase : Tuple = 1
lowercase : Optional[int] = v[0]
for i in range(1 , len(_A ) ):
if v[i] < tail[0]:
lowercase : int = v[i]
elif v[i] > tail[length - 1]:
lowercase : Dict = v[i]
length += 1
else:
lowercase : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = [True] * 1_00_00_01
_lowerCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
_lowerCAmelCase = False
i += 1
def UpperCamelCase ( _A ) -> bool:
return seive[n]
def UpperCamelCase ( _A ) -> bool:
return any(digit in """02468""" for digit in str(_A ) )
def UpperCamelCase ( _A = 1_000_000 ) -> list[int]:
lowercase : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_A ) and not contains_an_even_digit(_A ):
lowercase : str = str(_A )
lowercase : int = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )]
if all(is_prime(_A ) for i in list_nums ):
result.append(_A )
return result
def UpperCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 348 | 1 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase_ : Any = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase_ : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCAmelCase_ : List[Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def _lowerCamelCase (__lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
a__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
a__ = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __lowerCamelCase , )
is not None
):
a__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a__ = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
a__ = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
a__ = True
if not attribute_used:
a__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a__ = True
elif attribute.endswith("_token_id" ):
a__ = True
# configuration class specific cases
if not case_allowed:
a__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
a__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _lowerCamelCase (__lowerCamelCase : int ) -> List[Any]:
a__ = dict(inspect.signature(config_class.__init__ ).parameters )
a__ = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
a__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a__ = {}
if len(config_class.attribute_map ) > 0:
a__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a__ = inspect.getsourcefile(__lowerCamelCase )
a__ = os.path.dirname(__lowerCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a__ = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for fn in os.listdir(__lowerCamelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
a__ = []
for path in modeling_paths:
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as fp:
modeling_sources.append(fp.read() )
a__ = []
for config_param, default_value in zip(__lowerCamelCase , __lowerCamelCase ):
# `attributes` here is all the variant names for `config_param`
a__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCamelCase )
def _lowerCamelCase () -> List[Any]:
a__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCamelCase : inspect.isclass(__lowerCamelCase )
and issubclass(__lowerCamelCase , __lowerCamelCase )
and inspect.getmodule(__lowerCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
a__ = check_config_attributes_being_used(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a__ = unused_attributes
if len(__lowerCamelCase ) > 0:
a__ = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 489 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase_ : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase (__lowerCamelCase : str ) -> List[str]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase (__lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
if args.student_type == "roberta":
a__ = False
elif args.student_type == "gpt2":
a__ = False
def _lowerCamelCase (__lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> Union[str, Any]:
if args.student_type == "roberta":
a__ = False
def _lowerCamelCase () -> int:
a__ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__lowerCamelCase , required=__lowerCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__lowerCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=__lowerCamelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__lowerCamelCase , type=__lowerCamelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__lowerCamelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__lowerCamelCase , required=__lowerCamelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__lowerCamelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__lowerCamelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__lowerCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__lowerCamelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__lowerCamelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__lowerCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=__lowerCamelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__lowerCamelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__lowerCamelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__lowerCamelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__lowerCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__lowerCamelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__lowerCamelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__lowerCamelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowerCamelCase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=__lowerCamelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__lowerCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=__lowerCamelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=__lowerCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__lowerCamelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=__lowerCamelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__lowerCamelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__lowerCamelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__lowerCamelCase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__lowerCamelCase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__lowerCamelCase , default=4000 , help="Checkpoint interval." )
a__ = parser.parse_args()
sanity_checks(__lowerCamelCase )
# ARGS #
init_gpu_params(__lowerCamelCase )
set_seed(__lowerCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__lowerCamelCase ) , __lowerCamelCase , indent=4 )
git_log(args.dump_path )
a__ , a__ , a__ = MODEL_CLASSES[args.student_type]
a__ , a__ , a__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__ = tokenizer.all_special_tokens.index(__lowerCamelCase )
a__ = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
a__ = special_tok_ids
a__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
a__ = pickle.load(__lowerCamelCase )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
a__ = pickle.load(__lowerCamelCase )
a__ = np.maximum(__lowerCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__ = 0.0 # do not predict special tokens
a__ = torch.from_numpy(__lowerCamelCase )
else:
a__ = None
a__ = LmSeqsDataset(params=__lowerCamelCase , data=__lowerCamelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
a__ = student_config_class.from_pretrained(args.student_config )
a__ = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowerCamelCase )
else:
a__ = student_model_class(__lowerCamelCase )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
a__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowerCamelCase )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowerCamelCase , __lowerCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowerCamelCase , __lowerCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__ = Distiller(
params=__lowerCamelCase , dataset=__lowerCamelCase , token_probs=__lowerCamelCase , student=__lowerCamelCase , teacher=__lowerCamelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 489 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : List[Any] = LxmertTokenizer
_lowerCAmelCase : Tuple = LxmertTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = True
def __lowercase ( self : str ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self : str , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Dict = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 464 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] , A : Any ):
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : str = int(factor * num_class_images )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=A )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE : Union[str, Any] = client.query(text=A )
if len(A ) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = int(factor * num_images )
SCREAMING_SNAKE_CASE : str = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = tqdm(desc='''downloading real regularization images''' , total=A )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE : int = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE : int = requests.get(images['''url'''] )
if img.status_code == 200:
SCREAMING_SNAKE_CASE : List[str] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser('''''' , add_help=A )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=A , type=A )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=A , type=A )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=A )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 464 | 1 |
def lowercase_ (A : int , A : Optional[Any] , A : Union[str, Any] ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def lowercase_ (A : List[str] , A : Tuple , A : Tuple , A : int ):
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
snake_case__ : List[str] = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
snake_case__ : Union[str, Any] = -1
return False
def lowercase_ (A : Optional[Any] , A : Any ):
snake_case__ : str = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 478 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= ''
A__= 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : List[str] , _lowercase : Optional[DatasetInfo] = None , _lowercase : Optional[str] = None , **_lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(self , **_lowercase )
UpperCAmelCase__ = repo_info
UpperCAmelCase__ = token
UpperCAmelCase__ = None
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(_lowercase ): {"name": str(_lowercase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCAmelCase ( self : str , _lowercase : str , _lowercase : str = "rb" , **_lowercase : int , ):
"""simple docstring"""
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
UpperCAmelCase__ = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Dict , **_lowercase : Optional[int] ):
"""simple docstring"""
self._get_dirs()
UpperCAmelCase__ = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def _UpperCAmelCase ( self : int , _lowercase : Optional[Any] , _lowercase : Any=False , **_lowercase : int ):
"""simple docstring"""
self._get_dirs()
UpperCAmelCase__ = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ = p.parent
if root == path:
UpperCAmelCase__ = f
UpperCAmelCase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 475 | 0 |
'''simple docstring'''
def _snake_case ( lowercase ) -> Dict:
for i in range(0 , lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def _snake_case ( lowercase ) -> int:
for i in range(lowercase , 0 , -1 ):
for _ in range(lowercase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def _snake_case ( lowercase ) -> Optional[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowercase ) # upper half
reverse_floyd(lowercase ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__SCREAMING_SNAKE_CASE : Dict = 1
while K:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__SCREAMING_SNAKE_CASE : List[Any] = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 715 |
'''simple docstring'''
import qiskit
def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
__a : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 697 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""data2vec-text"""
def __init__( self : List[str] , _a : int=3_0522 , _a : int=768 , _a : Dict=12 , _a : List[str]=12 , _a : Optional[int]=3072 , _a : Union[str, Any]="gelu" , _a : Union[str, Any]=0.1 , _a : List[Any]=0.1 , _a : Optional[int]=512 , _a : List[Any]=2 , _a : int=0.02 , _a : Optional[int]=1e-12 , _a : List[str]=1 , _a : Any=0 , _a : Union[str, Any]=2 , _a : str="absolute" , _a : Tuple=True , _a : Any=None , **_a : str , ) -> List[str]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : List[str] = type_vocab_size
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Optional[int] = position_embedding_type
__lowerCamelCase : Optional[int] = use_cache
__lowerCamelCase : Optional[Any] = classifier_dropout
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 459 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = 'docs/source/en/_toctree.yml'
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Optional[int] = defaultdict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCAmelCase )
__lowerCamelCase : Dict = new_doc_list
__lowerCamelCase : Optional[Any] = [key for key, value in counts.items() if value > 1]
__lowerCamelCase : int = []
for duplicate_key in duplicates:
__lowerCamelCase : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowerCamelCase : Dict = sorted(_lowerCAmelCase ,key=lambda _lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCAmelCase )
# Sort
return overview_doc
def a_ ( _lowerCAmelCase=False ) -> Optional[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCamelCase : List[Any] = api_doc[scheduler_idx]['sections']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
__lowerCamelCase : str = False
if new_scheduler_doc != scheduler_doc:
__lowerCamelCase : int = True
if overwrite:
__lowerCamelCase : Any = new_scheduler_doc
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( _lowerCAmelCase=False ) -> List[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : Optional[Any] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCamelCase : int = False
__lowerCamelCase : str = api_doc[pipeline_idx]['sections']
__lowerCamelCase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCamelCase : str = pipeline_doc['section']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
if overwrite:
__lowerCamelCase : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCAmelCase )
# sort overall pipeline doc
__lowerCamelCase : int = clean_doc_toc(_lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__lowerCamelCase : Tuple = True
if overwrite:
__lowerCamelCase : int = new_pipeline_docs
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 459 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _a ( unittest.TestCase ):
'''simple docstring'''
A :Dict = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
a__ : str = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
a__ : Optional[int] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
for example in examples:
a__ : List[Any] = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
] , )
@require_torch
def _A ( self ):
"""simple docstring"""
a__ : Tuple = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
a__ : Dict = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
a__ : Union[str, Any] = pipeline(
"video-classification" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
a__ : str = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
a__ : Dict = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}] , )
a__ : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
] , )
@require_tf
def _A ( self ):
"""simple docstring"""
pass
| 713 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
a__ : List[Any] = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def lowercase ( SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = np.nan
for i in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = features[:, labels == i]
SCREAMING_SNAKE_CASE_ = data.mean(1 )
# Centralize the data of class i
SCREAMING_SNAKE_CASE_ = data - column_reshape(SCREAMING_SNAKE_CASE )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(SCREAMING_SNAKE_CASE , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE_ = np.dot(SCREAMING_SNAKE_CASE , centered_data.T )
return covariance_sum / features.shape[1]
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = features.mean(1 )
SCREAMING_SNAKE_CASE_ = np.nan
for i in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = features[:, labels == i]
SCREAMING_SNAKE_CASE_ = data.shape[1]
SCREAMING_SNAKE_CASE_ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE ) , (column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
SCREAMING_SNAKE_CASE_ = device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE ) , (column_reshape(SCREAMING_SNAKE_CASE ) - column_reshape(SCREAMING_SNAKE_CASE )).T , )
return covariance_sum / features.shape[1]
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
if features.any():
SCREAMING_SNAKE_CASE_ = features.mean(1 )
# Center the dataset
SCREAMING_SNAKE_CASE_ = features - np.reshape(SCREAMING_SNAKE_CASE , (data_mean.size, 1) )
SCREAMING_SNAKE_CASE_ = np.dot(SCREAMING_SNAKE_CASE , centered_data.T ) / features.shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Take all the columns in the reverse order (-1), and then takes only the first
SCREAMING_SNAKE_CASE_ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
SCREAMING_SNAKE_CASE_ = np.dot(filtered_eigenvectors.T , SCREAMING_SNAKE_CASE )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=SCREAMING_SNAKE_CASE )
logging.error('Dataset empty' )
raise AssertionError
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eigh(
covariance_between_classes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , covariance_within_classes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
SCREAMING_SNAKE_CASE_ = eigenvectors[:, ::-1][:, :dimensions]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.linalg.svd(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = svd_matrix[:, 0:dimensions]
SCREAMING_SNAKE_CASE_ = np.dot(filtered_svd_matrix.T , SCREAMING_SNAKE_CASE )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=SCREAMING_SNAKE_CASE )
logging.error('Dataset empty' )
raise AssertionError
def lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
SCREAMING_SNAKE_CASE_ = np.array([0, 0, 0, 1, 1] )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(SCREAMING_SNAKE_CASE ) as error_info:
SCREAMING_SNAKE_CASE_ = linear_discriminant_analysis(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(SCREAMING_SNAKE_CASE ) as error_info:
SCREAMING_SNAKE_CASE_ = principal_component_analysis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = strip_accents
SCREAMING_SNAKE_CASE_ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ = normalizer_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = do_lower_case
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 205 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = field(default_factory=A )
lowerCAmelCase__ = field(default_factory=A )
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tensor , _lowerCAmelCase : Tensor):
'''simple docstring'''
__lowercase =len(list(m.modules())) == 1 or isinstance(_lowerCAmelCase , nn.Convad) or isinstance(_lowerCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_lowerCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return list(filter(lambda _lowerCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0
lowerCAmelCase__ = field(default_factory=A )
lowerCAmelCase__ = field(default_factory=A )
def __call__( self : Any , _lowerCAmelCase : Tensor):
'''simple docstring'''
__lowercase =Tracker(self.dest)(_lowerCAmelCase).parametrized
__lowercase =Tracker(self.src)(_lowerCAmelCase).parametrized
__lowercase =list(filter(lambda _lowerCAmelCase: type(_lowerCAmelCase) not in self.src_skip , _lowerCAmelCase))
__lowercase =list(filter(lambda _lowerCAmelCase: type(_lowerCAmelCase) not in self.dest_skip , _lowerCAmelCase))
if len(_lowerCAmelCase) != len(_lowerCAmelCase):
raise Exception(
f"""Numbers of operations are different. Source module has {len(_lowerCAmelCase)} operations while"""
f""" destination module has {len(_lowerCAmelCase)}.""")
for dest_m, src_m in zip(_lowerCAmelCase , _lowerCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""")
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
__lowercase =timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__lowercase =ResNetForImageClassification(_lowerCAmelCase ).eval()
__lowercase =ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__lowercase =torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__lowercase =f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__lowercase =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_lowerCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True ):
"""simple docstring"""
__lowercase ='imagenet-1k-id2label.json'
__lowercase =1_000
__lowercase =(1, num_labels)
__lowercase ='huggingface/label-files'
__lowercase =num_labels
__lowercase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__lowercase ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 454 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowercase_ = """us-east-1""" # defaults region
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
UpperCamelCase = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_00,
'''save_steps''': 55_00,
}
UpperCamelCase = {**hyperparameters, '''max_steps''': 10_00}
@property
def snake_case_( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_( self ) -> str:
return f'{self.framework}-transfromers-test'
@property
def snake_case_( self ) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def snake_case_( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowerCamelCase ( __lowerCamelCase : List[str] ) ->List[str]:
_SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
| 314 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) ->str | Literal[False]:
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_SCREAMING_SNAKE_CASE = """_"""
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : list[str] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
while True:
_SCREAMING_SNAKE_CASE = ["""$"""] * len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = compare_string(binary[i] , binary[j] )
if k is False:
_SCREAMING_SNAKE_CASE = """*"""
_SCREAMING_SNAKE_CASE = """*"""
temp.append("""X""" )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Sequence[float] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
for minterm in minterms:
_SCREAMING_SNAKE_CASE = """"""
for _ in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int ) ->bool:
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[str] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_SCREAMING_SNAKE_CASE = j
if count == 1:
_SCREAMING_SNAKE_CASE = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = 0
temp.append(prime_implicants[i] )
while True:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = -1
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = chart[i].count(1 )
if count_n > max_n:
_SCREAMING_SNAKE_CASE = count_n
_SCREAMING_SNAKE_CASE = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = 0
def lowerCamelCase ( __lowerCamelCase : list[str] , __lowerCamelCase : list[str] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = prime_implicants[i].count("""_""" )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = 1
return chart
def lowerCamelCase ( ) ->None:
_SCREAMING_SNAKE_CASE = int(input("""Enter the no. of variables\n""" ) )
_SCREAMING_SNAKE_CASE = [
float(__lowerCamelCase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_SCREAMING_SNAKE_CASE = decimal_to_binary(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = check(__lowerCamelCase )
print("""Prime Implicants are:""" )
print(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = prime_implicant_chart(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = selection(__lowerCamelCase , __lowerCamelCase )
print("""Essential Prime Implicants are:""" )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 314 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , A__ , ):
"""simple docstring"""
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = 13
UpperCAmelCase_: Tuple = 7
UpperCAmelCase_: str = True
UpperCAmelCase_: str = True
UpperCAmelCase_: str = False
UpperCAmelCase_: List[str] = True
UpperCAmelCase_: Union[str, Any] = 99
UpperCAmelCase_: Optional[int] = 32
UpperCAmelCase_: List[str] = 2
UpperCAmelCase_: Any = 4
UpperCAmelCase_: Dict = 37
UpperCAmelCase_: List[str] = "gelu"
UpperCAmelCase_: Optional[int] = 0.1
UpperCAmelCase_: str = 0.1
UpperCAmelCase_: Optional[Any] = 512
UpperCAmelCase_: Optional[Any] = 16
UpperCAmelCase_: Union[str, Any] = 2
UpperCAmelCase_: int = 0.02
UpperCAmelCase_: Optional[int] = 3
UpperCAmelCase_: Optional[Any] = 4
UpperCAmelCase_: List[str] = None
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_: Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_: Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: int = None
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: List[str] = None
if self.use_labels:
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_: Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: int = TFDistilBertModel(config=A__ )
UpperCAmelCase_: Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_: Union[str, Any] = model(A__ )
UpperCAmelCase_: Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase_: int = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: int = TFDistilBertForMaskedLM(config=A__ )
UpperCAmelCase_: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_: Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = TFDistilBertForQuestionAnswering(config=A__ )
UpperCAmelCase_: List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase_: Any = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Any = self.num_labels
UpperCAmelCase_: Union[str, Any] = TFDistilBertForSequenceClassification(A__ )
UpperCAmelCase_: List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_: Optional[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Tuple = self.num_choices
UpperCAmelCase_: List[str] = TFDistilBertForMultipleChoice(A__ )
UpperCAmelCase_: Union[str, Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_: int = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_: Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase_: str = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = self.num_labels
UpperCAmelCase_: List[Any] = TFDistilBertForTokenClassification(A__ )
UpperCAmelCase_: Any = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_: Optional[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.prepare_config_and_inputs()
(UpperCAmelCase_): Tuple = config_and_inputs
UpperCAmelCase_: Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( snake_case__ , snake_case__ , unittest.TestCase ):
snake_case_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
snake_case_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = TFDistilBertModelTester(self )
UpperCAmelCase_: Tuple = ConfigTester(self , config_class=A__ , dim=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A__ )
@slow
def snake_case_ ( self ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase_: Union[str, Any] = TFDistilBertModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_: str = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_: str = model(A__ )[0]
UpperCAmelCase_: Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape , A__ )
UpperCAmelCase_: Tuple = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 ) | 709 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = 1
UpperCAmelCase_: Optional[Any] = 3
UpperCAmelCase_: Optional[int] = (32, 32)
UpperCAmelCase_: int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Union[str, Any] = self.dummy_vae
UpperCAmelCase_: Optional[int] = self.dummy_text_encoder
UpperCAmelCase_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Dict = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[int] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: int = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Optional[Any] = output.images
UpperCAmelCase_: List[str] = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: List[Any] = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=A__ , )[0]
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Union[str, Any] = DDPMScheduler()
UpperCAmelCase_: Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Any = self.dummy_text_encoder
UpperCAmelCase_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: List[str] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: str = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Any = output.images
assert image.shape[0] == 2
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: Any = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: int = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Dict = self.dummy_text_encoder
UpperCAmelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_: List[str] = unet.half()
UpperCAmelCase_: Union[str, Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: str = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_: str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: List[str] = "a cat sitting on a park bench"
UpperCAmelCase_: Any = torch.manual_seed(0 )
UpperCAmelCase_: Any = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_: Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Any = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Tuple = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: str = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_: Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 306 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1024, 1024]
UpperCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 150
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
UpperCAmelCase = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_dpt_config(lowerCAmelCase_ )
# load original state_dict from URL
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(lowerCAmelCase_ )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(lowerCAmelCase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=lowerCAmelCase_ )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**lowerCAmelCase_ ).logits if '''ade''' in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth
# Assert logits
UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowerCAmelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase_ )
)
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you\'re pushing to the hub.",
)
__A : Optional[int] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 130 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __a ( ) -> int:
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_= 9, 14 # noqa: F841
UpperCAmelCase_= [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_= defaultdict(lowerCAmelCase_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_= mst(lowerCAmelCase_ )
UpperCAmelCase_= [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_= tuple(answer[:2] )
UpperCAmelCase_= tuple(edge[::-1] )
assert edge in result or reverse in result
| 593 | 0 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, "src", "diffusers")
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ = find_backend(' if not is_torch_available():' )
self.assertEqual(lowercase_,'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A__ = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowercase_,'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A__ = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowercase_,'torch_and_transformers_and_onnx' )
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch',lowercase_ )
self.assertIn('torch_and_transformers',lowercase_ )
self.assertIn('flax_and_transformers',lowercase_ )
self.assertIn('torch_and_transformers_and_onnx',lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel',objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel',objects['flax'] )
self.assertIn('StableDiffusionPipeline',objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline',objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler',objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline',objects['torch_and_transformers_and_onnx'] )
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = create_dummy_object('CONSTANT','\'torch\'' )
self.assertEqual(lowercase_,'\nCONSTANT = None\n' )
A__ = create_dummy_object('function','\'torch\'' )
self.assertEqual(
lowercase_,'\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
A__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
A__ = create_dummy_object('FakeClass','\'torch\'' )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
A__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'],lowercase_ )
| 586 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple,lowercase_ : str,lowercase_ : Optional[Any]=7,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Optional[int]=2_0_0_0,lowercase_ : Dict=2_0_4_8,lowercase_ : int=1_2_8,lowercase_ : str=1,lowercase_ : List[Any]=5_1_2,lowercase_ : Union[str, Any]=3_0,lowercase_ : Any=4_4_1_0_0,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Tuple,lowercase_ : List[Any]=False,lowercase_ : Optional[int]=False )-> str:
'''simple docstring'''
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = TvltFeatureExtractor
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_,'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_,'feature_size' ) )
self.assertTrue(hasattr(lowercase_,'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_,'hop_length' ) )
self.assertTrue(hasattr(lowercase_,'chunk_length' ) )
self.assertTrue(hasattr(lowercase_,'sampling_rate' ) )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
A__ = self.feature_extraction_class.from_pretrained(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_,'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
A__ = self.feature_extraction_class.from_json_file(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
A__ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0,mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(lowercase_ )
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(lowercase_,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 1_9_2, 1_2_8) )
A__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],lowercase_,atol=1E-4 ) )
| 586 | 1 |
def A ( lowercase__ : int , lowercase__ : float , lowercase__ : float ) -> float:
return round(float(moles / volume ) * nfactor )
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a=0.01 , a=10_00 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self , a , a , a=False , a=True ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
_UpperCamelCase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A_ ( self , a , a , a , a=False , a=2 , a=False ) -> Any:
'''simple docstring'''
random.seed(a )
_UpperCamelCase = list(a )
_UpperCamelCase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 612 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def lowerCamelCase ( _snake_case : Any ,_snake_case : List[str] ,_snake_case : str ,_snake_case : Dict ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : str ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 ,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase__ = min(_snake_case ,_snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase__ = max(_snake_case ,_snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_snake_case )
def lowerCamelCase ( _snake_case : Optional[Any] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase__ = Pipe()
lowercase__ = Pipe()
process_array_.append(
Process(
target=_snake_case ,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) ,) )
lowercase__ = temp_rs
lowercase__ = temp_rr
for i in range(1 ,len(_snake_case ) - 1 ):
lowercase__ = Pipe()
lowercase__ = Pipe()
process_array_.append(
Process(
target=_snake_case ,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) ,) )
lowercase__ = temp_rs
lowercase__ = temp_rr
process_array_.append(
Process(
target=_snake_case ,args=(
len(_snake_case ) - 1,
arr[len(_snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_snake_case ) - 1],
) ,) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 ,len(_snake_case ) ):
lowercase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = list(range(10 ,0 ,-1 ) )
print("Initial List" )
print(*_snake_case )
lowercase__ = odd_even_transposition(_snake_case )
print("Sorted List\n" )
print(*_snake_case )
if __name__ == "__main__":
main()
| 539 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/blenderbot_small-90M": 512,
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ :Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ :List[Any] = BlenderbotSmallTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_="<|endoftext|>" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> Optional[Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase_ ,merges=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,trim_offsets=UpperCAmelCase_ ,) ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = add_prefix_space
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ) -> Optional[int]:
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> List[int]:
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 539 | 1 |
import logging
import os
from .state import PartialState
class A ( logging.LoggerAdapter ):
@staticmethod
def lowercase_ (__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase_ (self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
UpperCAmelCase__ = kwargs.pop("main_process_only" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("in_order" , __UpperCAmelCase )
if self.isEnabledFor(__UpperCAmelCase ):
if self._should_log(__UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = self.process(__UpperCAmelCase , __UpperCAmelCase )
self.logger.log(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif in_order:
UpperCAmelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase__ , UpperCAmelCase__ = self.process(__UpperCAmelCase , __UpperCAmelCase )
self.logger.log(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
state.wait_for_everyone()
def lowerCAmelCase_ ( __A, __A = None ) -> Union[str, Any]:
'''simple docstring'''
if log_level is None:
UpperCAmelCase__ = os.environ.get("ACCELERATE_LOG_LEVEL", __A )
UpperCAmelCase__ = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A, {} )
| 486 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCamelCase__ = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['input_ids', 'attention_mask']
def __init__(self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : Union[str, Any]=[] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ (self : Dict ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : Dict , __UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self : int , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
UpperCAmelCase__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 486 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
a_ = DetrConfig(use_timm_backbone=UpperCamelCase , backbone_config=UpperCamelCase )
# set label attributes
a_ = """panoptic""" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = """huggingface/label-files"""
a_ = """coco-detection-id2label.json"""
a_ = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
a_ = {int(UpperCamelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any ) -> str:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ) -> Tuple:
"""simple docstring"""
a_ = state_dict.pop(UpperCamelCase )
a_ = val
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
a_ = """"""
if is_panoptic:
a_ = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a_ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
a_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple=None , UpperCamelCase : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCamelCase )
# load original model from torch hub
a_ = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"""Converting model {model_name}...""" )
a_ = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=UpperCamelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase ):
if is_panoptic:
a_ = """detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
a_ = state_dict.pop(UpperCamelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCamelCase )
a_ = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
a_ = state_dict.pop(UpperCamelCase )
a_ = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
a_ = state_dict.pop(UpperCamelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCamelCase ) if is_panoptic else DetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion on an image
a_ = """coco_panoptic""" if is_panoptic else """coco_detection"""
a_ = DetrImageProcessor(format=UpperCamelCase )
a_ = processor(images=prepare_img() , return_tensors="""pt""" )
a_ = encoding["""pixel_values"""]
a_ = detr(UpperCamelCase )
a_ = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_A = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 403 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=4 , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_attention_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_choices
def __magic_name__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_attention_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self ):
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ = config_and_inputs
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self ):
a_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __magic_name__ ( self ):
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_SCREAMING_SNAKE_CASE )
a_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
a_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_SCREAMING_SNAKE_CASE )
a_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
a_ = model(_SCREAMING_SNAKE_CASE )[0]
a_ = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
a_ = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __magic_name__ ( self ):
a_ = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_SCREAMING_SNAKE_CASE )
a_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
a_ = model(_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
a_ = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 403 | 1 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (DDIMParallelScheduler,)
__lowercase : List[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def snake_case__ ( self , **__lowercase ):
"""simple docstring"""
__A : List[str] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def snake_case__ ( self , **__lowercase ):
"""simple docstring"""
__A : int = self.scheduler_classes[0]
__A : List[Any] = self.get_scheduler_config(**UpperCamelCase__ )
__A : Tuple = scheduler_class(**UpperCamelCase__ )
__A ,__A : List[Any] = 10, 0.0
__A : Tuple = self.dummy_model()
__A : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for t in scheduler.timesteps:
__A : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
__A : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase__ )
__A : Optional[Any] = self.scheduler_classes[0]
__A : Any = self.get_scheduler_config(steps_offset=1 )
__A : List[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , )
def snake_case__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase__ , eta=UpperCamelCase__ )
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = self.scheduler_classes[0]
__A : Dict = self.get_scheduler_config()
__A : str = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = self.scheduler_classes[0]
__A : Any = self.get_scheduler_config()
__A : List[str] = scheduler_class(**UpperCamelCase__ )
__A ,__A : Optional[Any] = 10, 0.0
scheduler.set_timesteps(UpperCamelCase__ )
__A : str = self.dummy_model()
__A : Any = self.dummy_sample_deter
__A : Optional[int] = self.dummy_sample_deter + 0.1
__A : Tuple = self.dummy_sample_deter - 0.1
__A : Tuple = samplea.shape[0]
__A : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
__A : int = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1 , UpperCamelCase__ )
__A : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__A : Dict = scheduler.batch_step_no_noise(UpperCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase__ )
__A : str = torch.sum(torch.abs(UpperCamelCase__ ) )
__A : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.full_loop()
__A : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
__A : Any = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = self.full_loop(prediction_type='v_prediction' )
__A : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
__A : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : str = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
__A : int = torch.sum(torch.abs(UpperCamelCase__ ) )
__A : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.0_1 )
__A : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
__A : Tuple = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 365 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Any = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ['''MaskFormerFeatureExtractor''']
_UpperCAmelCase : str = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
_UpperCAmelCase : int = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
# TODO Update this
__A : Tuple = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """esm"""
def __init__( self : Any , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : str=7_6_8 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : Optional[int]=3_0_7_2 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=1_0_2_6 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=1e-12 , __UpperCamelCase : List[Any]="absolute" , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : int=None , __UpperCamelCase : Any=False , __UpperCamelCase : Dict=False , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : int=None , **__UpperCamelCase : Union[str, Any] , )->Dict:
super().__init__(pad_token_id=__UpperCamelCase , mask_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = emb_layer_norm_before
_UpperCAmelCase = token_dropout
_UpperCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_UpperCAmelCase = EsmFoldConfig()
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = EsmFoldConfig(**__UpperCamelCase )
_UpperCAmelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_UpperCAmelCase = get_default_vocab_list()
else:
_UpperCAmelCase = vocab_list
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __UpperCamelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , __UpperCamelCase ):
_UpperCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = 0
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = 128
UpperCamelCase__ = None
def lowercase__ ( self : Union[str, Any] )->Any:
if self.trunk is None:
_UpperCAmelCase = TrunkConfig()
elif isinstance(self.trunk , __UpperCamelCase ):
_UpperCAmelCase = TrunkConfig(**self.trunk )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = asdict(self )
_UpperCAmelCase = self.trunk.to_dict()
return output
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 48
UpperCamelCase__ = 1_024
UpperCamelCase__ = 128
UpperCamelCase__ = 32
UpperCamelCase__ = 32
UpperCamelCase__ = 32
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = False
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = None
def lowercase__ ( self : str )->str:
if self.structure_module is None:
_UpperCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCamelCase ):
_UpperCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
_UpperCAmelCase = self.sequence_state_dim // self.sequence_head_width
_UpperCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = asdict(self )
_UpperCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 384
UpperCamelCase__ = 128
UpperCamelCase__ = 16
UpperCamelCase__ = 128
UpperCamelCase__ = 12
UpperCamelCase__ = 4
UpperCamelCase__ = 8
UpperCamelCase__ = 0.1
UpperCamelCase__ = 8
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 7
UpperCamelCase__ = 10
UpperCamelCase__ = 1e-8
UpperCamelCase__ = 1e5
def lowercase__ ( self : List[str] )->Optional[Any]:
return asdict(self )
def lowercase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 708 |
"""simple docstring"""
import operator
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : list | None = None ):
'''simple docstring'''
_UpperCAmelCase = operator.lt if reverse else operator.gt
_UpperCAmelCase = solution or []
if not arr:
return solution
_UpperCAmelCase = [arr.pop(0 )]
for i, item in enumerate(_SCREAMING_SNAKE_CASE ):
if _operator(_SCREAMING_SNAKE_CASE , sublist[-1] ):
sublist.append(_SCREAMING_SNAKE_CASE )
arr.pop(_SCREAMING_SNAKE_CASE )
# merging sublist into solution list
if not solution:
solution.extend(_SCREAMING_SNAKE_CASE )
else:
while sublist:
_UpperCAmelCase = sublist.pop(0 )
for i, xx in enumerate(_SCREAMING_SNAKE_CASE ):
if not _operator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solution.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
break
else:
solution.append(_SCREAMING_SNAKE_CASE )
strand_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 95 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( __a ) -> float:
"""simple docstring"""
return np.dot(__a , __a )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[str] , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None:
'''simple docstring'''
lowerCamelCase__: Dict =regularization
lowerCamelCase__: Any =gamma
if kernel == "linear":
lowerCamelCase__: Dict =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma")
if not isinstance(self.gamma , (float, int)):
raise ValueError("gamma must be float or int")
if not self.gamma > 0:
raise ValueError("gamma must be > 0")
lowerCamelCase__: Tuple =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}"""
raise ValueError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float:
'''simple docstring'''
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =observations
lowerCamelCase__: Optional[int] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
lowerCamelCase__: int =0
((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0)
lowerCamelCase__: str =Bounds(0 , self.regularization)
lowerCamelCase__: Union[str, Any] =minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
lowerCamelCase__: str =l_star
# calculating mean offset of separation plane to points
lowerCamelCase__: Tuple =0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
lowerCamelCase__: int =s / n
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
from __future__ import annotations
from math import pi
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if inductance < 0:
raise ValueError("""Inductance cannot be negative""")
if frequency < 0:
raise ValueError("""Frequency cannot be negative""")
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648 | 0 |
from __future__ import annotations
UpperCamelCase_ = []
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
for i in range(len(__UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__UpperCAmelCase , -1 , -1 ) , range(__UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__UpperCAmelCase , -1 , -1 ) , range(__UpperCAmelCase , len(__UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if row >= len(__UpperCAmelCase ):
solution.append(__UpperCAmelCase )
printboard(__UpperCAmelCase )
print()
return True
for i in range(len(__UpperCAmelCase ) ):
if is_safe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = 1
solve(__UpperCAmelCase , row + 1 )
UpperCAmelCase_ = 0
return False
def A ( __UpperCAmelCase ) -> None:
'''simple docstring'''
for i in range(len(__UpperCAmelCase ) ):
for j in range(len(__UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
UpperCamelCase_ = 8
UpperCamelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 705 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="instructblip_vision_model"
def __init__( self :int , _lowercase :List[str]=1408 , _lowercase :Dict=6144 , _lowercase :List[Any]=39 , _lowercase :List[Any]=16 , _lowercase :Union[str, Any]=224 , _lowercase :int=14 , _lowercase :Any="gelu" , _lowercase :Optional[Any]=1E-6 , _lowercase :List[Any]=0.0 , _lowercase :Union[str, Any]=1E-1_0 , _lowercase :List[Any]=True , **_lowercase :Optional[int] , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = qkv_bias
@classmethod
def __a ( cls :Union[str, Any] , _lowercase :Union[str, os.PathLike] , **_lowercase :Tuple) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
UpperCAmelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : str ="instructblip_qformer"
def __init__( self :List[Any] , _lowercase :List[Any]=30522 , _lowercase :str=768 , _lowercase :str=12 , _lowercase :int=12 , _lowercase :str=3072 , _lowercase :Optional[Any]="gelu" , _lowercase :Optional[int]=0.1 , _lowercase :int=0.1 , _lowercase :List[Any]=512 , _lowercase :Any=0.02 , _lowercase :Dict=1E-1_2 , _lowercase :int=0 , _lowercase :Any="absolute" , _lowercase :Optional[int]=2 , _lowercase :Optional[int]=1408 , **_lowercase :List[Any] , ) -> str:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = cross_attention_frequency
UpperCAmelCase_ = encoder_hidden_size
@classmethod
def __a ( cls :Any , _lowercase :Union[str, os.PathLike] , **_lowercase :int) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
UpperCAmelCase_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] ="instructblip"
UpperCamelCase__ : int =True
def __init__( self :List[Any] , _lowercase :Tuple=None , _lowercase :Tuple=None , _lowercase :Any=None , _lowercase :Any=32 , **_lowercase :Union[str, Any]) -> List[str]:
super().__init__(**_lowercase)
if vision_config is None:
UpperCAmelCase_ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
UpperCAmelCase_ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
UpperCAmelCase_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
UpperCAmelCase_ = InstructBlipVisionConfig(**_lowercase)
UpperCAmelCase_ = InstructBlipQFormerConfig(**_lowercase)
UpperCAmelCase_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCAmelCase_ = CONFIG_MAPPING[text_model_type](**_lowercase)
UpperCAmelCase_ = self.text_config.tie_word_embeddings
UpperCAmelCase_ = self.text_config.is_encoder_decoder
UpperCAmelCase_ = num_query_tokens
UpperCAmelCase_ = self.vision_config.hidden_size
UpperCAmelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.02
@classmethod
def __a ( cls :Tuple , _lowercase :InstructBlipVisionConfig , _lowercase :InstructBlipQFormerConfig , _lowercase :PretrainedConfig , **_lowercase :str , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.qformer_config.to_dict()
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 561 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
UpperCamelCase = '''timm_backbone'''
def __init__( self : Optional[int] , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : int=None , **__lowerCamelCase : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase = backbone
UpperCAmelCase = num_channels
UpperCAmelCase = features_only
UpperCAmelCase = use_pretrained_backbone
UpperCAmelCase = True
UpperCAmelCase = out_indices if out_indices is not None else (-1,)
| 377 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
UpperCAmelCase = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
UpperCAmelCase = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
UpperCAmelCase = model.state_dict()
def to_tf_var_name(lowerCAmelCase_ ):
for patt, repl in iter(lowerCAmelCase_ ):
UpperCAmelCase = name.replace(lowerCAmelCase_ , lowerCAmelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase = tf.get_variable(dtype=lowerCAmelCase_ , shape=tensor.shape , name=lowerCAmelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase = to_tf_var_name(lowerCAmelCase_ )
UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase = torch_tensor.T
UpperCAmelCase = create_tf_var(tensor=lowerCAmelCase_ , name=lowerCAmelCase_ , session=lowerCAmelCase_ )
tf.keras.backend.set_value(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = session.run(lowerCAmelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def _UpperCamelCase ( lowerCAmelCase_=None ) ->Union[str, Any]:
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Directory in which to save tensorflow model""" )
UpperCAmelCase = parser.parse_args(lowerCAmelCase_ )
UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 377 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase__ = {ord(char) for char in VALID_CHARS}
UpperCamelCase__ = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = ""
lowercase_ : int
lowercase_ : int
lowercase_ : int
for keychar, cipherchar in zip(cycle(lowercase_ ) , lowercase_ ):
lowercase_ : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[str] = []
for key in product(lowercase_ , repeat=3 ):
lowercase_ : int = try_key(lowercase_ , lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "p059_cipher.txt" ):
"""simple docstring"""
lowercase_ : list[int]
lowercase_ : list[str]
lowercase_ : str
lowercase_ : str
lowercase_ : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
lowercase_ : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
lowercase_ : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
lowercase_ : Union[str, Any] = filter_common_word(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
break
lowercase_ : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640 | 0 |
'''simple docstring'''
a_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
a_ = ['a', 'b', 'c', 'd', 'e']
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =start
# add current to visited
visited.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ : List[Any] =topological_sort(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ : int =topological_sort(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
a_ = topological_sort('a', [], [])
print(sort) | 296 |
'''simple docstring'''
from __future__ import annotations
import requests
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(UpperCamelCase__ ).json()
def _a( UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] ='''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
SCREAMING_SNAKE_CASE__ : str =requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _a( UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =hackernews_top_stories(UpperCamelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 296 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase:
"""simple docstring"""
def __init__( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = {}
def __a ( self , lowerCamelCase ) -> None:
"""simple docstring"""
lowercase__ : Optional[int] = {}
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowerCamelCase )
if nodea not in self.connections:
self.add_node(lowerCamelCase )
lowercase__ : Optional[int] = probability
def __a ( self ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Tuple = 0
lowercase__ : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> dict[str, int]:
lowercase__ : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = Counter(graph.get_nodes() )
lowercase__ : Tuple = start
for _ in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = graph.transition(SCREAMING_SNAKE_CASE_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__a : str = logging.get_logger(__name__)
__a : Any = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = """longformer"""
def __init__( self , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 0 , lowerCamelCase = 2 , lowerCamelCase = 30522 , lowerCamelCase = 768 , lowerCamelCase = 12 , lowerCamelCase = 12 , lowerCamelCase = 3072 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 512 , lowerCamelCase = 2 , lowerCamelCase = 0.02 , lowerCamelCase = 1E-12 , lowerCamelCase = False , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
lowercase__ : Dict = attention_window
lowercase__ : Optional[int] = sep_token_id
lowercase__ : List[Any] = bos_token_id
lowercase__ : List[str] = eos_token_id
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = intermediate_size
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Any = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = onnx_export
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = "default" , lowerCamelCase = None ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowercase__ : str = True
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ : Optional[Any] = super().outputs
if self.task == "default":
lowercase__ : Optional[Any] = {0: "batch"}
return outputs
@property
def __a ( self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __a ( self ) -> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def __a ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ) -> Mapping[str, Any]:
"""simple docstring"""
lowercase__ : int = super().generate_dummy_inputs(
preprocessor=lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
lowercase__ : List[Any] = 1
return inputs | 298 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
if len(_snake_case ) == 0:
return False
__snake_case : int = len(_snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , _snake_case )
if __name__ == "__main__":
__UpperCamelCase = input("Enter numbers separated by comma:\n").strip()
__UpperCamelCase = [int(item.strip()) for item in user_input.split(",")]
__UpperCamelCase = int(input("Enter the number to be found in the list:\n").strip())
__UpperCamelCase = "" if binary_search(sequence, target) else "not "
print(f"""{target} was {not_str}found in {sequence}""")
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : int = "openai-gpt"
a__ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : int=4_04_78 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : str=7_68 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[Any]="cls_index" , __lowerCAmelCase : str=True , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=0.1 , **__lowerCAmelCase : Tuple , ) -> Optional[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = afn
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_first_dropout
_A = summary_proj_to_labels
super().__init__(**__lowerCAmelCase )
| 2 | 0 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase__ = 6378137.0
lowercase__ = 6356752.314245
lowercase__ = 6_37_81_37
def __snake_case ( lowercase : float , lowercase : float , lowercase : float , lowercase : float ):
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
snake_case_ = atan((1 - flattening) * tan(radians(lowercase ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(lowercase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
snake_case_ = haversine_distance(lowercase , lowercase , lowercase , lowercase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
snake_case_ = (b_lata + b_lata) / 2
snake_case_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
snake_case_ = (sin(lowercase ) ** 2) * (cos(lowercase ) ** 2)
snake_case_ = cos(sigma / 2 ) ** 2
snake_case_ = (sigma - sin(lowercase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
snake_case_ = (cos(lowercase ) ** 2) * (sin(lowercase ) ** 2)
snake_case_ = sin(sigma / 2 ) ** 2
snake_case_ = (sigma + sin(lowercase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __snake_case ( lowercase : float , lowercase : float , lowercase : bool = False ):
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __snake_case ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : float = 10**-1 ):
snake_case_ = cross(lowercase , lowercase )
snake_case_ = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 420 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = '''roformer'''
def __init__( self, A=50_000, A=None, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_536, A=2, A=0.02, A=1E-12, A=0, A=False, A=True, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, **A )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = rotary_value
SCREAMING_SNAKE_CASE : str = use_cache
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'sequence'}
SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 28 |
'''simple docstring'''
from torch import nn
def __lowercase (_lowercase ) -> Union[str, Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 150 | 0 |
def _UpperCamelCase (a__ :int , a__ :int ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def _UpperCamelCase ():
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 721 |
from typing import Any
def _UpperCamelCase (a__ :list ):
"""simple docstring"""
if not input_list:
return []
UpperCamelCase__ = [input_list.count(a__ ) for value in input_list]
UpperCamelCase__ = max(a__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 | 0 |
'''simple docstring'''
def a_ ( UpperCamelCase_ ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 452 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a_ ( UpperCamelCase_ ):
create_state_space_tree(UpperCamelCase_ , [] , 0 )
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if index == len(UpperCamelCase_ ):
print(UpperCamelCase_ )
return
create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 452 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 37 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowercase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 37 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = """"""
__lowerCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowerCAmelCase : str = None # compression type in fsspec. ex: "gzip"
__lowerCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self ,__snake_case = "" ,__snake_case = None ,__snake_case = None ,**__snake_case ):
"""simple docstring"""
super().__init__(self ,**__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A_ = fsspec.open(
__snake_case ,mode='''rb''' ,protocol=__snake_case ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
A_ = os.path.basename(self.file.path.split('''::''' )[0] )
A_ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
A_ = None
@classmethod
def __UpperCAmelCase ( cls ,__snake_case ):
"""simple docstring"""
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
if self.dir_cache is None:
A_ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
A_ = {f['''name''']: f}
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = "rb" ,__snake_case=None ,__snake_case=True ,__snake_case=None ,**__snake_case ,):
"""simple docstring"""
A_ = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = """bz2"""
__lowerCAmelCase : List[Any] = """bz2"""
__lowerCAmelCase : Optional[int] = """.bz2"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = """gzip"""
__lowerCAmelCase : List[Any] = """gzip"""
__lowerCAmelCase : Dict = """.gz"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = """lz4"""
__lowerCAmelCase : int = """lz4"""
__lowerCAmelCase : List[str] = """.lz4"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = """xz"""
__lowerCAmelCase : str = """xz"""
__lowerCAmelCase : Tuple = """.xz"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = """zstd"""
__lowerCAmelCase : int = """zstd"""
__lowerCAmelCase : Dict = """.zst"""
def __init__( self ,__snake_case ,__snake_case = "rb" ,__snake_case = None ,__snake_case = None ,__snake_case = DEFAULT_BLOCK_SIZE ,**__snake_case ,):
"""simple docstring"""
super().__init__(
fo=__snake_case ,mode=__snake_case ,target_protocol=__snake_case ,target_options=__snake_case ,block_size=__snake_case ,**__snake_case ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A_ = self.file.__enter__
class UpperCAmelCase_ :
def __init__( self ,__snake_case ):
"""simple docstring"""
A_ = file_
def __enter__( self ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self ,*__snake_case ,**__snake_case ):
"""simple docstring"""
self._file.__exit__(*__snake_case ,**__snake_case )
def __iter__( self ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self ):
"""simple docstring"""
return next(self._file )
def __getattr__( self ,__snake_case ):
"""simple docstring"""
return getattr(self._file ,__snake_case )
def fixed_enter(*__snake_case ,**__snake_case ):
return WrappedFile(_enter(*__snake_case ,**__snake_case ) )
A_ = fixed_enter
| 188 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ["""pixel_values"""]
def __init__( self ,__snake_case = True ,__snake_case = None ,__snake_case = None ,__snake_case = PILImageResampling.BILINEAR ,__snake_case = True ,__snake_case = 1 / 2_5_5 ,__snake_case = True ,__snake_case = None ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
super().__init__(**__snake_case )
A_ = size if size is not None else {'''shortest_edge''': 3_8_4}
A_ = get_size_dict(__snake_case ,default_to_square=__snake_case )
A_ = do_resize
A_ = size
# Default value set here for backwards compatibility where the value in config is None
A_ = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case = PILImageResampling.BICUBIC ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
A_ = get_size_dict(__snake_case ,default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A_ = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A_ = int(shortest_edge / crop_pct )
A_ = get_resize_output_image_size(__snake_case ,size=__snake_case ,default_to_square=__snake_case )
A_ = resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__snake_case ,size=(shortest_edge, shortest_edge) ,data_format=__snake_case ,**__snake_case )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__snake_case ,size=(shortest_edge, shortest_edge) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = ChannelDimension.FIRST ,**__snake_case ,):
"""simple docstring"""
A_ = do_resize if do_resize is not None else self.do_resize
A_ = crop_pct if crop_pct is not None else self.crop_pct
A_ = resample if resample is not None else self.resample
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(__snake_case ,default_to_square=__snake_case )
A_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
A_ = [self.resize(image=__snake_case ,size=__snake_case ,crop_pct=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
A_ = [self.rescale(image=__snake_case ,scale=__snake_case ) for image in images]
if do_normalize:
A_ = [self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case ) for image in images]
A_ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
A_ = {'''pixel_values''': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 188 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'layoutlmv3'
def __init__( self : int , snake_case : str=5_0265 , snake_case : List[Any]=768 , snake_case : Tuple=12 , snake_case : int=12 , snake_case : Any=3072 , snake_case : Optional[Any]="gelu" , snake_case : Dict=0.1 , snake_case : str=0.1 , snake_case : Union[str, Any]=512 , snake_case : Optional[Any]=2 , snake_case : int=0.02 , snake_case : Union[str, Any]=1e-5 , snake_case : Any=1 , snake_case : Tuple=0 , snake_case : List[str]=2 , snake_case : Union[str, Any]=1024 , snake_case : Any=128 , snake_case : List[Any]=128 , snake_case : Union[str, Any]=True , snake_case : str=32 , snake_case : List[str]=128 , snake_case : Tuple=64 , snake_case : Dict=256 , snake_case : str=True , snake_case : int=True , snake_case : Tuple=True , snake_case : str=224 , snake_case : List[Any]=3 , snake_case : Optional[Any]=16 , snake_case : Dict=None , **snake_case : int , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case , hidden_size=snake_case , num_hidden_layers=snake_case , num_attention_heads=snake_case , intermediate_size=snake_case , hidden_act=snake_case , hidden_dropout_prob=snake_case , attention_probs_dropout_prob=snake_case , max_position_embeddings=snake_case , type_vocab_size=snake_case , initializer_range=snake_case , layer_norm_eps=snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
A__ : List[Any] = max_ad_position_embeddings
A__ : Optional[int] = coordinate_size
A__ : Optional[Any] = shape_size
A__ : Tuple = has_relative_attention_bias
A__ : List[Any] = rel_pos_bins
A__ : Union[str, Any] = max_rel_pos
A__ : int = has_spatial_attention_bias
A__ : Any = rel_ad_pos_bins
A__ : Optional[int] = max_rel_ad_pos
A__ : Dict = text_embed
A__ : List[str] = visual_embed
A__ : Dict = input_size
A__ : List[Any] = num_channels
A__ : List[Any] = patch_size
A__ : Dict = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.12' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def _UpperCamelCase ( self : str ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def _UpperCamelCase ( self : List[str] , snake_case : "ProcessorMixin" , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , snake_case : int = 3 , snake_case : int = 40 , snake_case : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , snake_case )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ : Dict = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ : Any = processor.tokenizer.num_special_tokens_to_add(snake_case )
A__ : Dict = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
A__ : Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ : Optional[int] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ : Optional[Any] = self._generate_dummy_images(snake_case , snake_case , snake_case , snake_case )
A__ : Any = dict(
processor(
snake_case , text=snake_case , boxes=snake_case , return_tensors=snake_case , ) )
return inputs
| 498 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any]=None ) ->Tuple:
A__ : Dict = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ : Any = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Tuple = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : str = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str=None ) ->List[str]:
A__ : Optional[Any] = None
if token is not None:
A__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A__ : Dict = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A__ : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : Union[str, Any] = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ) ->Tuple:
A__ : Tuple = None
if token is not None:
A__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Tuple = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : Dict = result.headers["""Location"""]
A__ : Union[str, Any] = requests.get(UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : int = os.path.join(UpperCAmelCase__, f'{artifact_name}.zip' )
with open(UpperCAmelCase__, """wb""" ) as fp:
fp.write(response.content )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple=None ) ->Tuple:
A__ : int = []
A__ : Union[str, Any] = []
A__ : Optional[Any] = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
A__ : int = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A__ : List[str] = line[: line.index(""": """ )]
A__ : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A__ : Any = line[len("""FAILED """ ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
A__ : Any = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` '
f'and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A__ : List[str] = None
if job_name and job_links:
A__ : Any = job_links.get(UpperCAmelCase__, UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
A__ : str = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ )]
return result
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int=None ) ->str:
A__ : List[Any] = []
A__ : Dict = [os.path.join(UpperCAmelCase__, UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__, job_links=UpperCAmelCase__ ) )
return errors
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict=None ) ->List[Any]:
A__ : Dict = Counter()
counter.update([x[1] for x in logs] )
A__ : str = counter.most_common()
A__ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A__ : Optional[int] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A__ : List[str] = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A__ : Union[str, Any] = test.split("""/""" )[2]
else:
A__ : int = None
return test
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str=None ) ->Optional[Any]:
A__ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
A__ : List[Any] = [x for x in logs if x[2] is not None]
A__ : Union[str, Any] = {x[2] for x in logs}
A__ : int = {}
for test in tests:
A__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A__ : Any = counter.most_common()
A__ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A__ : List[str] = sum(error_counts.values() )
if n_errors > 0:
A__ : str = {"""count""": n_errors, """errors""": error_counts}
A__ : Dict = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->List[Any]:
A__ : List[Any] = """| no. | error | status |"""
A__ : Union[str, Any] = """|-:|:-|:-|"""
A__ : Dict = [header, sep]
for error in reduced_by_error:
A__ : List[Any] = reduced_by_error[error]["""count"""]
A__ : List[Any] = f'| {count} | {error[:1_0_0]} | |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
A__ : str = """| model | no. of errors | major error | count |"""
A__ : Optional[int] = """|-:|-:|-:|-:|"""
A__ : Tuple = [header, sep]
for model in reduced_by_model:
A__ : Optional[Any] = reduced_by_model[model]["""count"""]
A__ , A__ : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
A__ : Optional[int] = f'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
A_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ = get_job_links(args.workflow_run_id, token=args.token)
A_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ = k.find(''' / ''')
A_ = k[index + len(''' / ''') :]
A_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ = reduce_by_error(errors)
A_ = reduce_by_model(errors)
A_ = make_github_table(reduced_by_error)
A_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 498 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a__ ( snake_case__ ):
lowercase_ = 'vit_msn'
def __init__( self : List[str] , UpperCamelCase_ : Optional[int]=768 , UpperCamelCase_ : str=12 , UpperCamelCase_ : str=12 , UpperCamelCase_ : Optional[Any]=3072 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Tuple=1e-06 , UpperCamelCase_ : Optional[int]=224 , UpperCamelCase_ : Tuple=16 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=True , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**A_)
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : str = image_size
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = qkv_bias
| 77 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_A = {
"RUCAIBox/mvp": 1_024,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = ['input_ids', 'attention_mask']
_snake_case : Any = MvpTokenizer
def __init__( self : str , A_ : int=None , A_ : List[Any]=None , A_ : Optional[Any]=None , A_ : int="replace" , A_ : int="<s>" , A_ : Any="</s>" , A_ : List[str]="</s>" , A_ : Optional[int]="<s>" , A_ : Optional[int]="<unk>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<mask>" , A_ : str=False , A_ : List[str]=True , **A_ : Union[str, Any] , )-> Any:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = getattr(A_ , pre_tok_state.pop("type" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**A_ )
__UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase = "post_processor"
__UpperCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
__UpperCamelCase = tuple(state["cls"] )
__UpperCamelCase = False
if state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get("trim_offsets" , A_ ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(A_ , state.pop("type" ) )
__UpperCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def A ( self : List[str] )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Any , A_ : List[Any] )-> List[Any]:
__UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__UpperCamelCase = value
def A ( self : str , *A_ : Dict , **A_ : Dict )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*A_ , **A_ )
def A ( self : Tuple , *A_ : str , **A_ : List[str] )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*A_ , **A_ )
def A ( self : Optional[int] , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def A ( self : Any , A_ : Dict , A_ : Dict=None )-> Union[str, Any]:
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 505 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =DebertaTokenizer
_lowerCAmelCase =True
_lowerCAmelCase =DebertaTokenizerFast
def UpperCAmelCase__ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
snake_case__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
snake_case__ : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Optional[Any] = {'unk_token': '[UNK]'}
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def UpperCAmelCase__ ( self : str , **_lowerCamelCase : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : List[str] ):
snake_case__ : int = 'lower newer'
snake_case__ : Optional[int] = 'lower newer'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : List[Any] = 'lower newer'
snake_case__ : str = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case__ : Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
snake_case__ : int = tokens + [tokenizer.unk_token]
snake_case__ : int = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : str = tokenizer('Hello' , 'World' )
snake_case__ : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , _lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Union[str, Any] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case__ : int = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase )
snake_case__ : str = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase )
snake_case__ : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
snake_case__ : Dict = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
snake_case__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
snake_case__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case__ : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case__ : int = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
snake_case__ : str = tokenizer(_lowerCamelCase , padding=_lowerCamelCase )
snake_case__ : List[Any] = [tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) for seq in encoding['input_ids']]
# fmt: off
snake_case__ : Optional[int] = {
'input_ids': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case__ : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , _lowerCamelCase )
for expected, decoded in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 709 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowercase__( A = 2_0_0_0_0_0_0 ):
snake_case__ : list[int] = [0]
snake_case__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case__ : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case__ : int = 0
# an estimate of b, using the quadratic formula
snake_case__ : float
# the largest integer less than b_estimate
snake_case__ : int
# the largest integer less than b_estimate
snake_case__ : int
# the triangle number corresponding to b_floor
snake_case__ : int
# the triangle number corresponding to b_ceil
snake_case__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case__ : List[Any] = floor(A )
snake_case__ : List[str] = ceil(A )
snake_case__ : List[str] = triangle_numbers[b_floor]
snake_case__ : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case__ : str = triangle_b_first_guess * triangle_a
snake_case__ : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case__ : Union[str, Any] = triangle_b_second_guess * triangle_a
snake_case__ : List[str] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 303 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase__ (snake_case__ : BertModel , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
_snake_case : Optional[Any] = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
_snake_case : Any = model.state_dict()
def to_tf_var_name(snake_case__ : str ):
for patt, repl in iter(snake_case__ ):
_snake_case : Union[str, Any] = name.replace(snake_case__ , snake_case__ )
return F"bert/{name}"
def create_tf_var(snake_case__ : np.ndarray , snake_case__ : str , snake_case__ : tf.Session ):
_snake_case : Any = tf.dtypes.as_dtype(tensor.dtype )
_snake_case : Tuple = tf.get_variable(dtype=snake_case__ , shape=tensor.shape , name=snake_case__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_snake_case : Dict = to_tf_var_name(snake_case__ )
_snake_case : int = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_snake_case : int = torch_tensor.T
_snake_case : int = create_tf_var(tensor=snake_case__ , name=snake_case__ , session=snake_case__ )
tf.keras.backend.set_value(snake_case__ , snake_case__ )
_snake_case : Dict = session.run(snake_case__ )
print(F"Successfully created {tf_name}: {np.allclose(snake_case__ , snake_case__ )}" )
_snake_case : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case__ , os.path.join(snake_case__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def UpperCAmelCase__ (snake_case__ : Dict=None ):
"""simple docstring"""
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case__ , required=snake_case__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=snake_case__ , default=snake_case__ , required=snake_case__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=snake_case__ , required=snake_case__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=snake_case__ , required=snake_case__ , help="""Directory in which to save tensorflow model""" )
_snake_case : List[str] = parser.parse_args(snake_case__ )
_snake_case : Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 609 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
A_ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
A_ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
A_ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ), reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""], )
def UpperCamelCase_ ( self: Any, a_: Union[str, Any], a_: Union[str, Any], a_: str=False ):
'''simple docstring'''
_snake_case : Optional[Any] = spearmanr(a_, a_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 609 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : str =parser.parse_args()
if not hasattr(lowerCAmelCase_ ,'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : int =args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 153 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] =failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =[0]
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : int =1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] =failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE = 'abc1abc12'
__SCREAMING_SNAKE_CASE = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE = 'ABABX'
__SCREAMING_SNAKE_CASE = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE = 'AAAB'
__SCREAMING_SNAKE_CASE = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE = 'abcdabcy'
__SCREAMING_SNAKE_CASE = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 153 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 408 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : str = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
UpperCAmelCase_ : str = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = RealmTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase=True , __lowercase=None , **__lowercase , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__A : Tuple = getattr(__lowercase , normalizer_state.pop('type' ) )
__A : Optional[int] = do_lower_case
__A : List[str] = strip_accents
__A : Dict = tokenize_chinese_chars
__A : List[Any] = normalizer_class(**__lowercase )
__A : int = do_lower_case
def snake_case__ ( self , __lowercase , **__lowercase ):
"""simple docstring"""
__A : Dict = PaddingStrategy.MAX_LENGTH
__A : Optional[int] = text
__A : Union[str, Any] = kwargs.pop('text_pair' , __lowercase )
__A : List[str] = kwargs.pop('return_tensors' , __lowercase )
__A : int = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__A : Tuple = batch_text_pair[idx]
else:
__A : Union[str, Any] = None
__A : Optional[int] = super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__A : str = encoded_candidates.get('input_ids' )
__A : Union[str, Any] = encoded_candidates.get('attention_mask' )
__A : Optional[int] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__A : List[str] = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
__A : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : Union[str, Any] = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : int = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 365 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be trained."} )
_a = field(
default="./",metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path of training dataset."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size for training."} )
_a = field(default=2,metadata={"help": "Batch size for evaluation."} )
_a = field(default=0.1,metadata={"help": "Value of weight decay."} )
_a = field(
default=1_0_0_0_0,metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_a = field(default=2e-4,metadata={"help": "Learning rate fo training."} )
_a = field(default="cosine",metadata={"help": "Learning rate."} )
_a = field(
default=7_5_0,metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_a = field(
default=1_6,metadata={"help": "Number of gradient accumulation steps."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_a = field(default=5_0_0_0_0,metadata={"help": "Maximum number of training steps."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Sequence lengths used for training."} )
_a = field(default=1,metadata={"help": "Training seed."} )
_a = field(
default=1_0_2_4,metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_a = field(default=UpperCamelCase__,metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(
default="codeparrot/codeparrot-clean-valid",metadata={"help": "Name or path of validation dataset."} )
_a = field(default=2,metadata={"help": "Batch size used for evaluation."} )
_a = field(
default=-1,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_a = field(default=1_0_2_4,metadata={"help": "Length of sequences to be evaluated."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Model name or path of model to be evaluated."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
_a = field(
default=UpperCamelCase__,metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Sample from the language model's output distribution."} )
_a = field(default=0.2,metadata={"help": "Sampling temperature used for generation."} )
_a = field(default=2_5_6,metadata={"help": "Maximum number of newly generated tokens."} )
_a = field(default=0,metadata={"help": "Top-k parameter used for generation."} )
_a = field(default=0.95,metadata={"help": "Top-p parameter used for nucleus sampling."} )
_a = field(default=1_0,metadata={"help": "Number of generations to run in parallel."} )
_a = field(
default=2_0_0,metadata={"help": "Number of completions to generate for each sample."} )
_a = field(default=1,metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="eval_results.json",metadata={"help": "Random seed used for evaluation."} )
_a = field(
default="0",metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_a = field(
default=-1,metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
},)
@dataclass
class lowercase :
_a = field(
default=UpperCamelCase__,metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
},)
_a = field(
default="transformersbook/codeparrot",metadata={"help": "Folder or name of dataset to process."} )
_a = field(
default="codeparrot-clean",metadata={"help": "Folder to save processed processed dataset."} )
_a = field(
default=1_0_0_0_0_0,metadata={"help": "Number of files to save per JSON output file."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(
default=1_0_0_0,metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_a = field(
default=1_0_0,metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_a = field(
default=0.25,metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_a = field(
default=1.5,metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_a = field(
default=0.7,metadata={"help": "Probability for filtering config, test and uncommon files."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."},)
_a = field(
default=UpperCamelCase__,metadata={"help": "If True, near-duplicate samples are removed."} )
_a = field(
default=0.85,metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
_a = field(
default="gpt2",metadata={"help": "Base tokenizer to build new tokenizer from."} )
_a = field(
default="transformersbook/codeparrot-train",metadata={"help": "Dataset to train tokenizer on."} )
_a = field(default="content",metadata={"help": "Column containing text data to process."} )
_a = field(default=2_0_0_0_0_0,metadata={"help": "Number of examples to train tokenizer on."} )
_a = field(
default=3_2_7_6_8,metadata={"help": "Number of examples to train the tokenizer on."} )
_a = field(default="codeparrot",metadata={"help": "Name of new tokenizer."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Name or path to the tokenizer."} )
_a = field(
default="codeparrot/codeparrot-clean-train",metadata={"help": "Name or path to the dataset to pretokenize."} )
_a = field(
default="tokenized-codeparrot-train",metadata={"help": "Repo name of the pretokenized data."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
_a = field(
default="gpt2-large",metadata={"help": "Configuration to use for model initialization."} )
_a = field(
default="codeparrot/codeparrot",metadata={"help": "Tokenizer attached to model."} )
_a = field(default="codeparrot",metadata={"help": "Name of the created model."} )
_a = field(default=UpperCamelCase__,metadata={"help": "Push saved tokenizer to the hub."} )
| 54 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54 | 1 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_UpperCAmelCase = 0
_UpperCAmelCase = str(__snake_case )
while len(__snake_case ) != 1:
_UpperCAmelCase = [int(__snake_case ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(__snake_case ) ):
total *= numbers[i]
_UpperCAmelCase = str(__snake_case )
steps += 1
return steps
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_UpperCAmelCase = 0
_UpperCAmelCase = str(__snake_case )
while len(__snake_case ) != 1:
_UpperCAmelCase = [int(__snake_case ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(__snake_case ) ):
total += numbers[i]
_UpperCAmelCase = str(__snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__magic_name__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _lowerCAmelCase ( UpperCamelCase_ = "mumbai" ):
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
__SCREAMING_SNAKE_CASE = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__SCREAMING_SNAKE_CASE = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 155 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A (__lowerCamelCase :Union[str, Any] ):
_lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Tuple ):
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_lowerCAmelCase = emb.weight.data
return lin_layer
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" )
_lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCamelCase )
_lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase = MaMaaaConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase = MaMaaaForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
_lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 162 |
'''simple docstring'''
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
assert x is not None
assert y is not None
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = len(__lowerCamelCase )
# declaring the array for storing the dp values
_lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase = """"""
_lowerCAmelCase , _lowerCAmelCase = m, n
while i > 0 and j > 0:
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowercase = """AGGTAB"""
_lowercase = """GXTXAYB"""
_lowercase = 4
_lowercase = """GTAB"""
_lowercase , _lowercase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 162 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : str = 16
__lowerCamelCase : str = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : Optional[Any] = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Union[str, Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Union[str, Any] = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : List[str] = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Union[str, Any] = 8
else:
snake_case_ : Dict = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : str = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : int = 2
# New Code #
snake_case_ : str = int(args.gradient_accumulation_steps )
snake_case_ : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : Dict = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Union[str, Any] = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : int = int(config["batch_size"] )
snake_case_ : Union[str, Any] = evaluate.load("glue" ,"mrpc" )
set_seed(__magic_name__ )
snake_case_, snake_case_ : Optional[int] = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Dict = AdamW(params=model.parameters() ,lr=__magic_name__ )
# Instantiate scheduler
snake_case_ : str = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
with LocalSGD(
accelerator=__magic_name__ ,model=__magic_name__ ,local_sgd_steps=__magic_name__ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
snake_case_ : str = model(**__magic_name__ )
snake_case_ : List[Any] = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : List[str] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" ,type=__magic_name__ ,default=1 ,help="The number of minibatches to be ran before gradients are accumulated." ,)
parser.add_argument(
"--local_sgd_steps" ,type=__magic_name__ ,default=8 ,help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Any = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str =logging.get_logger(__name__)
A_ : Any ="""https://openaipublic.azureedge.net/jukebox/models/"""
A_ : Dict ={
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Optional[Any]:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCamelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCamelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCamelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCamelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCamelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCamelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCamelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCamelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Optional[int] , snake_case : int , snake_case : Union[str, Any] )-> Dict:
_lowerCamelCase = {}
import re
_lowerCamelCase = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCamelCase = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCamelCase = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCamelCase = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCamelCase = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCamelCase = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCamelCase = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCamelCase = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCamelCase = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case ):
_lowerCamelCase = re_encoder_block_conv_in.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] )
_lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
_lowerCamelCase = re_encoder_block_conv_in.sub(snake_case , snake_case )
elif re_encoder_block_resnet.fullmatch(snake_case ):
_lowerCamelCase = re_encoder_block_resnet.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] )
_lowerCamelCase = {'1': 1, '3': 2}[groups[-2]]
_lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
_lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_lowerCamelCase = prefix + resnet_block
_lowerCamelCase = re_encoder_block_resnet.sub(snake_case , snake_case )
elif re_encoder_block_proj_out.fullmatch(snake_case ):
_lowerCamelCase = re_encoder_block_proj_out.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
_lowerCamelCase = re_encoder_block_proj_out.sub(snake_case , snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case ):
_lowerCamelCase = re_decoder_block_conv_out.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
_lowerCamelCase = re_decoder_block_conv_out.sub(snake_case , snake_case )
elif re_decoder_block_resnet.fullmatch(snake_case ):
_lowerCamelCase = re_decoder_block_resnet.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCamelCase = {'1': 1, '3': 2}[groups[-2]]
_lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
_lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_lowerCamelCase = prefix + resnet_block
_lowerCamelCase = re_decoder_block_resnet.sub(snake_case , snake_case )
elif re_decoder_block_proj_in.fullmatch(snake_case ):
_lowerCamelCase = re_decoder_block_proj_in.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
_lowerCamelCase = re_decoder_block_proj_in.sub(snake_case , snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case ):
_lowerCamelCase = re_prior_cond_conv_out.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCamelCase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
_lowerCamelCase = re_prior_cond_conv_out.sub(snake_case , snake_case )
elif re_prior_cond_resnet.fullmatch(snake_case ):
_lowerCamelCase = re_prior_cond_resnet.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCamelCase = {'1': 1, '3': 2}[groups[-2]]
_lowerCamelCase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
_lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
_lowerCamelCase = prefix + resnet_block
_lowerCamelCase = re_prior_cond_resnet.sub(snake_case , snake_case )
elif re_prior_cond_proj_in.fullmatch(snake_case ):
_lowerCamelCase = re_prior_cond_proj_in.match(snake_case )
_lowerCamelCase = regex_match.groups()
_lowerCamelCase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
_lowerCamelCase = re_prior_cond_proj_in.sub(snake_case , snake_case )
# keep original key
else:
_lowerCamelCase = original_key
_lowerCamelCase = replace_key(snake_case )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
_lowerCamelCase = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
_lowerCamelCase = original_key
_lowerCamelCase = original_key
_lowerCamelCase = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any]=None , snake_case : List[str]=None )-> Union[str, Any]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
_lowerCamelCase = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
_lowerCamelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCamelCase = JukeboxConfig.from_pretrained(snake_case )
_lowerCamelCase = JukeboxModel(snake_case )
_lowerCamelCase = []
_lowerCamelCase = {}
for i, dict_name in enumerate(snake_case ):
_lowerCamelCase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
_lowerCamelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCamelCase = old_dic[k]
elif k.endswith('.w' ):
_lowerCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCamelCase = old_dic[k]
else:
_lowerCamelCase = old_dic[k]
_lowerCamelCase = 'vqvae' if i == 0 else f'priors.{3 - i}'
_lowerCamelCase = fix_jukebox_keys(snake_case , model.state_dict() , snake_case , snake_case )
weight_dict.append(snake_case )
_lowerCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case )
for i in range(len(snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case ).mkdir(exist_ok=snake_case )
with open(f'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(snake_case , snake_case )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
return weight_dict
if __name__ == "__main__":
A_ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
A_ : Optional[int] =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 650 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DebertaTokenizer
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Any = DebertaTokenizerFast
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_lowercase = dict(zip(__A ,range(len(__A ) ) ) )
_lowercase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase = {'unk_token': '[UNK]'}
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def __UpperCAmelCase ( self : List[str] ,**__A : Dict ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Any ,__A : Any ) -> Dict:
_lowercase = 'lower newer'
_lowercase = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase = self.get_tokenizer()
_lowercase = 'lower newer'
_lowercase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A ,__A )
_lowercase = tokens + [tokenizer.unk_token]
_lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
_lowercase = self.get_tokenizer()
_lowercase = tokenizer('Hello' ,'World' )
_lowercase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] ,__A )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
_lowercase = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowercase = tokenizer.encode('sequence builders' ,add_special_tokens=__A )
_lowercase = tokenizer.encode('multi-sequence build' ,add_special_tokens=__A )
_lowercase = tokenizer.encode(
'sequence builders' ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A ,__A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowercase = tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowercase = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_lowercase = tokenizer(__A ,padding=__A )
_lowercase = [tokenizer.decode(__A ,skip_special_tokens=__A ) for seq in encoding['input_ids']]
# fmt: off
_lowercase = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowercase = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data ,__A )
for expected, decoded in zip(__A ,__A ):
self.assertEqual(__A ,__A )
| 706 |
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE__ ( ) -> Generator[int, None, None]:
_lowercase = {}
_lowercase = 2
while True:
_lowercase = factor_map.pop(snake_case__ , snake_case__ )
if factor:
_lowercase = factor + prime
while x in factor_map:
x += factor
_lowercase = factor
else:
_lowercase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE__ ( snake_case__ :float = 1E10 ) -> int:
_lowercase = sieve()
_lowercase = 1
while True:
_lowercase = next(snake_case__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case__ )
n += 2
if __name__ == "__main__":
print(solution()) | 535 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ : Tuple =logging.get_logger(__name__)
__magic_name__ : Optional[int] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__magic_name__ : Union[str, Any] ={
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__magic_name__ : Union[str, Any] ={"facebook/blenderbot_small-90M": 5_12}
def __snake_case ( lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
__magic_name__ = set()
__magic_name__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ = char
__magic_name__ = set(a_ )
return pairs
class UpperCamelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str="__start__" , _lowerCamelCase : Any="__end__" , _lowerCamelCase : Optional[Any]="__unk__" , _lowerCamelCase : List[Any]="__null__" , **_lowerCamelCase : int , ) -> Optional[int]:
super().__init__(unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , encoding="utf-8" ) as vocab_handle:
__magic_name__ = json.load(lowerCamelCase_ )
__magic_name__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ , encoding="utf-8" ) as merges_handle:
__magic_name__ = merges_handle.read().split("\n" )[1:-1]
__magic_name__ = [tuple(merge.split() ) for merge in merges]
__magic_name__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__magic_name__ = {}
@property
def __A ( self : Optional[int] ) -> int:
return len(self.encoder )
def __A ( self : List[Any] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : Tuple , _lowerCamelCase : str ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ = re.sub("([.,!?()])" , r" \1" , lowerCamelCase_ )
__magic_name__ = re.sub("(\')" , r" \1 " , lowerCamelCase_ )
__magic_name__ = re.sub(r"\s{2,}" , " " , lowerCamelCase_ )
if "\n" in token:
__magic_name__ = token.replace("\n" , " __newln__" )
__magic_name__ = token.split(" " )
__magic_name__ = []
for token in tokens:
if not len(lowerCamelCase_ ):
continue
__magic_name__ = token.lower()
__magic_name__ = tuple(lowerCamelCase_ )
__magic_name__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ = get_pairs(lowerCamelCase_ )
if not pairs:
words.append(lowerCamelCase_ )
continue
while True:
__magic_name__ = min(lowerCamelCase_ , key=lambda _lowerCamelCase : self.bpe_ranks.get(lowerCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ = bigram
__magic_name__ = []
__magic_name__ = 0
while i < len(lowerCamelCase_ ):
try:
__magic_name__ = word.index(lowerCamelCase_ , lowerCamelCase_ )
new_word.extend(word[i:j] )
__magic_name__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ = tuple(lowerCamelCase_ )
__magic_name__ = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
__magic_name__ = get_pairs(lowerCamelCase_ )
__magic_name__ = '''@@ '''.join(lowerCamelCase_ )
__magic_name__ = word[:-4]
__magic_name__ = word
words.append(lowerCamelCase_ )
return " ".join(lowerCamelCase_ )
def __A ( self : Optional[Any] , _lowerCamelCase : str ) -> List[str]:
__magic_name__ = []
__magic_name__ = re.findall(r"\S+\n?" , lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(" " ) ) )
return split_tokens
def __A ( self : Any , _lowerCamelCase : str ) -> int:
__magic_name__ = token.lower()
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __A ( self : Optional[int] , _lowerCamelCase : int ) -> str:
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def __A ( self : List[Any] , _lowerCamelCase : List[str] ) -> str:
__magic_name__ = ''' '''.join(lowerCamelCase_ ).replace("@@ " , "" ).strip()
return out_string
def __A ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + "\n" )
__magic_name__ = 0
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
__magic_name__ = token_index
writer.write(" ".join(lowerCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 664 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """bert"""
def __init__( self :Any , lowerCamelCase_ :List[Any]=3_05_22 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :int=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :int="absolute" , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=None , **lowerCamelCase_ :List[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 698 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_A : Optional[int] = numpy.array([0, 0])
_A : Union[str, Any] = numpy.array([0.5, 0.8_66_02_54])
_A : List[Any] = numpy.array([1, 0])
_A : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __lowerCAmelCase ( snake_case : list[numpy.ndarray] , snake_case : int ) -> list[numpy.ndarray]:
__lowerCamelCase: Optional[int] = initial_vectors
for _ in range(snake_case ):
__lowerCamelCase: int = iteration_step(snake_case )
return vectors
def __lowerCAmelCase ( snake_case : list[numpy.ndarray] ) -> list[numpy.ndarray]:
__lowerCamelCase: Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowerCamelCase: Union[str, Any] = vectors[i + 1]
new_vectors.append(snake_case )
__lowerCamelCase: List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __lowerCAmelCase ( snake_case : numpy.ndarray , snake_case : float ) -> numpy.ndarray:
__lowerCamelCase: List[Any] = numpy.radians(snake_case )
__lowerCamelCase: Dict = numpy.cos(snake_case ), numpy.sin(snake_case )
__lowerCamelCase: Optional[int] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def __lowerCAmelCase ( snake_case : list[numpy.ndarray] ) -> None:
__lowerCamelCase: Optional[Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowerCamelCase: Optional[int] = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Dict = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 705 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 189 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
__lowercase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCAmelCase )
from datasets import load_dataset
__lowercase = load_dataset("""nielsr/rvlcdip-demo""" )
__lowercase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
__lowercase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**__UpperCAmelCase )
__lowercase = outputs.logits
__lowercase = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__lowercase = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 566 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case : str = '>>zh<<'
snake_case : Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
snake_case : Optional[Any] = 'pt'
elif is_tf_available():
snake_case : Optional[int] = 'tf'
else:
snake_case : Optional[Any] = 'jax'
@require_sentencepiece
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} )
a : float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( lowerCamelCase_ : DataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , ):
def _dataset(lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , ref_path=lowerCamelCase_ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase_ , file_path=lowerCamelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase_ , tokenizer=lowerCamelCase_ , evaluate=lowerCamelCase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_collator=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , prediction_loss_only=lowerCamelCase_ , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output['''eval_loss'''] )
__lowercase = {'''perplexity''': perplexity}
__lowercase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowerCamelCase_ )
return results
def _lowerCAmelCase ( lowerCamelCase_ : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56 | 0 |
'''simple docstring'''
__lowerCAmelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 358 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Optional[Any] = []
_a : List[str] = 1
while len(lowerCAmelCase_ ) < 1E6:
constant.append(str(lowerCAmelCase_ ) )
i += 1
_a : Optional[Any] = ''.join(lowerCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 358 | 1 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__A : int = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__A : List[str] = logging.WARNING
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = os.getenv("""DATASETS_VERBOSITY""" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def UpperCAmelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( ):
'''simple docstring'''
# Apply our default configuration to the library root logger.
snake_case_ : int = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( lowerCamelCase_ :Optional[str] = None ):
'''simple docstring'''
if name is None:
snake_case_ : List[str] = _get_library_name()
return logging.getLogger(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
_get_library_root_logger().setLevel(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = False
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __UpperCamelCase :
def __init__( self :Optional[Any] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :List[Any] ): # pylint: disable=unused-argument
snake_case_ : Optional[int] = args[0] if args else None
def __iter__( self :List[Any] ):
return iter(self._iterator )
def __getattr__( self :int ,_UpperCamelCase :int ):
def empty_fn(*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Tuple ):
return self
def __exit__( self :Optional[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[Any] ):
return
__A : List[Any] = True
class __UpperCamelCase :
def __call__( self :List[Any] ,*_UpperCamelCase :int ,_UpperCamelCase :Any=False ,**_UpperCamelCase :Optional[Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_UpperCamelCase ,**_UpperCamelCase )
else:
return EmptyTqdm(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Any ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Tuple ):
snake_case_ : int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Tuple ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A : str = _tqdm_cls()
def UpperCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
snake_case_ : Tuple = True
def UpperCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
snake_case_ : Union[str, Any] = False | 267 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self :Union[str, Any] ,_UpperCamelCase :Tuple="sayef/fsner-bert-base-uncased" ):
super(_UpperCamelCase ,self ).__init__()
snake_case_ : Dict = AutoModel.from_pretrained(_UpperCamelCase ,return_dict=_UpperCamelCase )
snake_case_ : Dict = torch.nn.CosineSimilarity(3 ,1E-0_8 )
snake_case_ : int = torch.nn.Softmax(dim=1 )
def a__ ( self :Optional[Any] ,**_UpperCamelCase :Optional[Any] ):
return self.bert(**_UpperCamelCase ).last_hidden_state
def a__ ( self :List[Any] ,_UpperCamelCase :Dict ):
return token_embeddings.sum(2 ,keepdim=_UpperCamelCase )
def a__ ( self :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str]=1 ):
return self.softmax(T * self.cos(_UpperCamelCase ,_UpperCamelCase ) )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = W_supports["""sizes"""].tolist()
snake_case_ : Dict = W_supports["""start_token_id"""].item()
snake_case_ : int = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ : int = self.BERT(**_UpperCamelCase )
snake_case_ : int = self.BERT(**_UpperCamelCase )
snake_case_ : Any = None
snake_case_ : Any = None
snake_case_ : Union[str, Any] = W_supports["""input_ids"""] == start_token_id
snake_case_ : Tuple = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(_UpperCamelCase ):
if i == 0:
snake_case_ : str = 0
else:
snake_case_ : Optional[Any] = support_sizes[i - 1]
snake_case_ : Dict = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ : Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ : int = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
snake_case_ : List[str] = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case_ : List[str] = torch.vstack((p_starts, p_start) )
snake_case_ : Tuple = torch.vstack((p_ends, p_end) )
else:
snake_case_ : Any = p_start
snake_case_ : List[str] = p_end
return p_starts, p_ends | 267 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowercase ( _UpperCAmelCase=32 , _UpperCAmelCase=10 , _UpperCAmelCase=100 , _UpperCAmelCase=1_026 , _UpperCAmelCase=True , _UpperCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , _UpperCAmelCase="igf_context_pairs.jbl" , ) -> Any:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__lowercase , __lowercase = generate_datasets(
_UpperCAmelCase , _UpperCAmelCase , number=_UpperCAmelCase , min_len=1_026 , trim=_UpperCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowercase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__lowercase = load_gpta("gpt2" ).to(_UpperCAmelCase )
print("computing perplexity on objective set" )
__lowercase = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).item()
print("perplexity on objective set:" , _UpperCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=15 , _UpperCAmelCase=128 , _UpperCAmelCase=100 , _UpperCAmelCase="igf_model.pt" , ) -> str:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__lowercase = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__lowercase = SecondaryLearner(_UpperCAmelCase )
# Train secondary learner
__lowercase = train_secondary_learner(
_UpperCAmelCase , _UpperCAmelCase , max_epochs=_UpperCAmelCase , batch_size=_UpperCAmelCase , eval_freq=100 , igf_model_path=_UpperCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=32 , _UpperCAmelCase=1_000 , _UpperCAmelCase=16 , _UpperCAmelCase=1.0 , _UpperCAmelCase=recopy_gpta , _UpperCAmelCase=None , _UpperCAmelCase=10 , _UpperCAmelCase="gpt2_finetuned.pt" , ) -> List[str]:
'''simple docstring'''
__lowercase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__lowercase = RandomSampler(_UpperCAmelCase )
__lowercase = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase )
__lowercase = max_steps // (len(_UpperCAmelCase )) + 1
__lowercase = 0
__lowercase = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase , __lowercase , __lowercase = recopy_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCAmelCase )
secondary_learner.eval()
__lowercase = []
__lowercase = 0
__lowercase = []
__lowercase = []
# Compute the performance of the transformer model at the beginning
__lowercase = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
test_perps.append(_UpperCAmelCase )
print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase )
for epoch in range(int(_UpperCAmelCase ) ):
for step, example in enumerate(_UpperCAmelCase ):
torch.cuda.empty_cache()
__lowercase = random.randint(0 , example.size(2 ) - context_len - 1 )
__lowercase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowercase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
__lowercase = True
if secondary_learner is not None:
__lowercase = secondary_learner.forward(
torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowercase = -1
if predicted_q < threshold:
__lowercase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowercase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowercase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowercase = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
test_perps.append(_UpperCAmelCase )
print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_UpperCAmelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_UpperCAmelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_000 , type=_UpperCAmelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_UpperCAmelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_UpperCAmelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_UpperCAmelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_UpperCAmelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_026 , type=_UpperCAmelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_UpperCAmelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_UpperCAmelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_UpperCAmelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_UpperCAmelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__lowercase = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__lowercase = training_secondary_learner(
_UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__lowercase = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowercase , __lowercase = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=_UpperCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCAmelCase , secondary_learner=_UpperCAmelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 321 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_vision_model"""
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=288 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_text_model"""
def __init__( self , lowerCAmelCase_=5_0265 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower"""
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=768 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
# TODO: remove this once the Hub files are updated.
__lowercase = kwargs.pop("text_config_dict" , lowerCAmelCase_ )
__lowercase = kwargs.pop("vision_config_dict" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
__lowercase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
__lowercase = BridgeTowerTextConfig(**lowerCAmelCase_ )
__lowercase = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 321 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = BlenderbotSmallTokenizer
_A : List[Any] = False
def A_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : List[str] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__UpperCAmelCase : Dict = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : str = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__UpperCAmelCase : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def A_ ( self : int , **__lowercase : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def A_ ( self : str , __lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = '''adapt act apte'''
__UpperCAmelCase : Optional[Any] = '''adapt act apte'''
return input_text, output_text
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : Dict = '''adapt act apte'''
__UpperCAmelCase : Optional[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
__UpperCAmelCase : List[str] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCAmelCase : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
__UpperCAmelCase : List[str] = '''I am a small frog.'''
__UpperCAmelCase : Union[str, Any] = tok([src_text] , padding=__lowercase , truncation=__lowercase )['''input_ids''']
__UpperCAmelCase : Any = tok.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__UpperCAmelCase : Tuple = '''I am a small frog .'''
__UpperCAmelCase : str = '''.'''
__UpperCAmelCase : List[str] = tok(__lowercase )['''input_ids''']
__UpperCAmelCase : Optional[Any] = tok(__lowercase )['''input_ids''']
assert encoded[-1] == encoded_dot[0] | 374 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Dict = nn.Linear(3 , 4 )
__UpperCAmelCase : Union[str, Any] = nn.BatchNormad(4 )
__UpperCAmelCase : List[str] = nn.Linear(4 , 5 )
def A_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowercase ) ) )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Union[str, Any] , __lowercase : Optional[int] , *__lowercase : str , **__lowercase : Optional[int] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Any , __lowercase : Tuple , __lowercase : Any ):
'''simple docstring'''
return output + 1
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Optional[int] = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
self.assertEqual(test_model._hf_hook , __lowercase )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = ModelForTest()
__UpperCAmelCase : Tuple = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
add_hook_to_module(__lowercase , __lowercase , append=__lowercase )
self.assertEqual(isinstance(test_model._hf_hook , __lowercase ) , __lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Tuple = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = test_model(x + 1 )
__UpperCAmelCase : Optional[Any] = test_model(x + 2 )
__UpperCAmelCase : Optional[int] = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : int = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Any = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = test_model(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1e-5 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : Tuple = test_model(__lowercase )
__UpperCAmelCase : int = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : str = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : List[str] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Dict = test_model(__lowercase )
assert torch.allclose(__lowercase , output + 2 , atol=1e-5 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
__UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
__UpperCAmelCase : str = test_model(__lowercase )
__UpperCAmelCase : Union[str, Any] = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = test_model(__lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(__lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__lowercase , AlignDevicesHook(io_same_device=__lowercase ) )
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 ).to(0 )
__UpperCAmelCase : int = model(__lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Tuple = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[int] = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : int = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__UpperCAmelCase : str = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Optional[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : List[Any] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Optional[Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Dict = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Optional[int] = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase , offload_buffers=__lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : str = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : str = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[Any] = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Any = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() , offload_buffers=__lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : List[str] = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) | 374 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = 'deberta-v2'
def __init__( self , lowerCamelCase__=1_2_8_1_0_0 , lowerCamelCase__=1_5_3_6 , lowerCamelCase__=2_4 , lowerCamelCase__=2_4 , lowerCamelCase__=6_1_4_4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-7 , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=0 , lowerCamelCase__="gelu" , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = relative_attention
_lowerCamelCase = max_relative_positions
_lowerCamelCase = pad_token_id
_lowerCamelCase = position_biased_input
# Backwards compatibility
if type(lowerCamelCase__ ) == str:
_lowerCamelCase = [x.strip() for x in pos_att_type.lower().split('''|''' )]
_lowerCamelCase = pos_att_type
_lowerCamelCase = vocab_size
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = kwargs.get('''pooler_hidden_size''' , lowerCamelCase__ )
_lowerCamelCase = pooler_dropout
_lowerCamelCase = pooler_hidden_act
class lowerCamelCase_( A__ ):
'''simple docstring'''
@property
def snake_case__ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case__ ( self ):
return 1_2
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 3 , lowerCamelCase__ = 4_0 , lowerCamelCase__ = 4_0 , lowerCamelCase__ = None , ):
_lowerCamelCase = super().generate_dummy_inputs(preprocessor=lowerCamelCase__ , framework=lowerCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 661 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( _a , _a , _a , unittest.TestCase ):
_A : Any = StableUnCLIPImgaImgPipeline
_A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A : Optional[Any] = frozenset([] )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:str = 32
SCREAMING_SNAKE_CASE:Optional[int] = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE:List[str] = CLIPImageProcessor(crop_size=32 ,size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:str = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=SCREAMING_SNAKE_CASE__ ,projection_dim=SCREAMING_SNAKE_CASE__ ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Tuple = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=SCREAMING_SNAKE_CASE__ ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type="projection" ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=SCREAMING_SNAKE_CASE__ ,layers_per_block=1 ,upcast_attention=SCREAMING_SNAKE_CASE__ ,use_linear_projection=SCREAMING_SNAKE_CASE__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Tuple = DDIMScheduler(
beta_schedule="scaled_linear" ,beta_start=0.00_085 ,beta_end=0.012 ,prediction_type="v_prediction" ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[Any] = AutoencoderKL()
SCREAMING_SNAKE_CASE:Optional[int] = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any=0 ,SCREAMING_SNAKE_CASE__ : Any=True ):
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if pil_image:
SCREAMING_SNAKE_CASE:Dict = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE:str = input_image.clamp(0 ,1 )
SCREAMING_SNAKE_CASE:Tuple = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
SCREAMING_SNAKE_CASE:int = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE:List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE:Dict = StableUnCLIPImgaImgPipeline(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
inputs.update({"image_embeds": None} )
SCREAMING_SNAKE_CASE:List[str] = sd_pipe(**SCREAMING_SNAKE_CASE__ ).images
SCREAMING_SNAKE_CASE:Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE:str = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:int = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE__ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def __UpperCamelCase ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE:Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE:List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" ,torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE:Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE:str = pipe(SCREAMING_SNAKE_CASE__ ,"anime turle" ,generator=SCREAMING_SNAKE_CASE__ ,output_type="np" )
SCREAMING_SNAKE_CASE:List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE:Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE:int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" ,torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(SCREAMING_SNAKE_CASE__ ,"anime turle" ,generator=SCREAMING_SNAKE_CASE__ ,output_type="np" )
SCREAMING_SNAKE_CASE:int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE:Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE:int = pipe(
SCREAMING_SNAKE_CASE__ ,"anime turtle" ,num_inference_steps=2 ,output_type="np" ,)
SCREAMING_SNAKE_CASE:Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 709 |
'''simple docstring'''
import random
def A_ ( snake_case , snake_case , snake_case = False ):
SCREAMING_SNAKE_CASE:dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def A_ ( snake_case ):
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465 | 0 |
def _lowercase ( __lowerCamelCase : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ : Union[str, Any] = 1
if upper_limit > 0:
UpperCamelCase__ : Union[str, Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(__lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_SCREAMING_SNAKE_CASE : Optional[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 344 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def _lowercase ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : str = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase__ : Optional[int] = int(re.match(R'''.*layer_(\d*).*''' ,__lowerCamelCase )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def _lowercase ( __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCamelCase__ : List[Any] = re.search(R'''[^\d](\d+)$''' ,str(__lowerCamelCase ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
UpperCamelCase__ : Any = int(bit_search.groups()[0] )
return bit_size // 8
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if bloom_config_file == "":
UpperCamelCase__ : Union[str, Any] = BloomConfig()
else:
UpperCamelCase__ : int = BloomConfig.from_json_file(__lowerCamelCase )
if shard_model:
UpperCamelCase__ : Tuple = os.listdir(__lowerCamelCase )
UpperCamelCase__ : Tuple = sorted(filter(lambda __lowerCamelCase : s.startswith('''layer''' ) and "model_00" in s ,__lowerCamelCase ) )
UpperCamelCase__ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Dict = BloomConfig()
for j, file in enumerate(__lowerCamelCase ):
print('''Processing file: {}'''.format(__lowerCamelCase ) )
UpperCamelCase__ : str = None
for i in range(__lowerCamelCase ):
# load all TP files
UpperCamelCase__ : Union[str, Any] = file.replace('''model_00''' ,F'model_0{i}' )
UpperCamelCase__ : List[str] = torch.load(os.path.join(__lowerCamelCase ,__lowerCamelCase ) ,map_location='''cpu''' )
# Rename keys in the transformers names
UpperCamelCase__ : Optional[Any] = list(temp.keys() )
for key in keys:
UpperCamelCase__ : Optional[Any] = temp.pop(__lowerCamelCase )
if tensors is None:
UpperCamelCase__ : Dict = temp
else:
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase__ : str = torch.cat([tensors[key], temp[key]] ,dim=__lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase__ : List[str] = tensors[key] / pretraining_tp
torch.save(
__lowerCamelCase ,os.path.join(
__lowerCamelCase ,'''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) ,str(len(__lowerCamelCase ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
UpperCamelCase__ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase__ : int = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) ,str(len(__lowerCamelCase ) ).zfill(5 ) )
UpperCamelCase__ : Union[str, Any] = BloomConfig()
UpperCamelCase__ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase__ : Any = total_size
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__lowerCamelCase ,WEIGHTS_NAME + '''.index.json''' ) ,'''w''' ,encoding='''utf-8''' ) as f:
UpperCamelCase__ : List[Any] = json.dumps(__lowerCamelCase ,indent=2 ,sort_keys=__lowerCamelCase ) + '''\n'''
f.write(__lowerCamelCase )
else:
UpperCamelCase__ : Union[str, Any] = BloomModel(__lowerCamelCase )
UpperCamelCase__ : List[str] = os.listdir(__lowerCamelCase )
UpperCamelCase__ : int = sorted(filter(lambda __lowerCamelCase : s.startswith('''layer''' ) and "model_00" in s ,__lowerCamelCase ) )
UpperCamelCase__ : Any = None
for i, file in enumerate(__lowerCamelCase ):
UpperCamelCase__ : List[Any] = None
for i in range(__lowerCamelCase ):
# load all TP files
UpperCamelCase__ : str = file.replace('''model_00''' ,F'model_0{i}' )
UpperCamelCase__ : List[Any] = torch.load(os.path.join(__lowerCamelCase ,__lowerCamelCase ) ,map_location='''cpu''' )
# Rename keys in the transformers names
UpperCamelCase__ : Dict = list(temp.keys() )
for key in keys:
UpperCamelCase__ : Any = temp.pop(__lowerCamelCase )
if tensors is None:
UpperCamelCase__ : Optional[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase__ : str = torch.cat([tensors[key], temp[key]] ,dim=__lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase__ : Any = tensors[key] / pretraining_tp
UpperCamelCase__ : Optional[Any] = model.load_state_dict(__lowerCamelCase ,strict=__lowerCamelCase )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
UpperCamelCase__ : Optional[int] = set(other_keys.missing_keys )
else:
UpperCamelCase__ : List[str] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
UpperCamelCase__ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ : int = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
UpperCamelCase__ : List[str] = model.to(config.torch_dtype )
torch.save(model.state_dict() ,__lowerCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 344 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowercase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , snake_case : int , snake_case : Optional[Any]=2 , snake_case : Tuple=56 , snake_case : Dict=True , snake_case : Any=True , snake_case : Any=True , snake_case : Tuple=True , snake_case : Dict=99 , snake_case : Dict=32 , snake_case : int=2 , snake_case : Optional[int]=2 , snake_case : List[str]=7 , snake_case : str="gelu_new" , snake_case : List[Any]=0.1 , snake_case : Tuple=0.1 , snake_case : Tuple=512 , snake_case : str=16 , snake_case : Tuple=2 , snake_case : str=0.02 , snake_case : Any=4 , snake_case : int="block_sparse" , snake_case : List[str]=True , snake_case : str=False , snake_case : Any=2 , snake_case : Optional[int]=3 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_choices
SCREAMING_SNAKE_CASE : Any = rescale_embeddings
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : Tuple = use_bias
SCREAMING_SNAKE_CASE : str = block_size
SCREAMING_SNAKE_CASE : Tuple = num_random_blocks
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Tuple = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(snake_case )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(snake_case , snake_case )
SCREAMING_SNAKE_CASE : Any = model_class(snake_case )
@jax.jit
def model_jitted(snake_case : Tuple , snake_case : Tuple=None , **snake_case : List[str] ):
return model(input_ids=snake_case , attention_mask=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : str = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Tuple = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self : int , snake_case : List[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[Any]=1E-5 , snake_case : Any="outputs" , snake_case : int=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) | 719 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCamelCase : int = """base_with_context"""
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Any = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : List[Any] = ly_weight['attention']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Dict = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : List[Any] = ly_weight['attention']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE : Tuple = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = ly_weight['self_attention']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = ly_weight['MultiHeadDotProductAttention_0']
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE : int = jnp.tree_util.tree_map(onp.array , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
SCREAMING_SNAKE_CASE : Dict = inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = load_decoder(ta_checkpoint['target']['decoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
SCREAMING_SNAKE_CASE : int = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_lowerCamelCase : Any = parser.parse_args()
main(args) | 308 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
SCREAMING_SNAKE_CASE_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
SCREAMING_SNAKE_CASE_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
SCREAMING_SNAKE_CASE_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> tuple[str, float]:
_UpperCAmelCase : List[Any] = len([g for position, g in enumerate(lowerCAmelCase ) if g == main_target[position]] )
return (item, float(lowerCAmelCase ))
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> tuple[str, str]:
_UpperCAmelCase : Optional[int] = random.randint(0 , len(lowerCAmelCase ) - 1 )
_UpperCAmelCase : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCAmelCase : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: list[str] ) -> str:
_UpperCAmelCase : List[str] = list(lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_UpperCAmelCase : List[str] = random.choice(lowerCAmelCase )
return "".join(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: tuple[str, float] , lowerCAmelCase: list[tuple[str, float]] , lowerCAmelCase: list[str] , ) -> list[str]:
_UpperCAmelCase : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
_UpperCAmelCase : Dict = int(parent_a[1] * 100 ) + 1
_UpperCAmelCase : int = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = population_score[random.randint(0 , lowerCAmelCase )][0]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = crossover(parent_a[0] , lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase , lowerCAmelCase ) )
pop.append(mutate(lowerCAmelCase , lowerCAmelCase ) )
return pop
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: list[str] , lowerCAmelCase: bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_UpperCAmelCase : Any = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCAmelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCAmelCase : List[str] = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase )
# Generate random starting population.
_UpperCAmelCase : Optional[Any] = []
for _ in range(lowerCAmelCase ):
population.append("".join([random.choice(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCAmelCase : Tuple = [evaluate(lowerCAmelCase , lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
_UpperCAmelCase : str = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCAmelCase : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase )
# Normalize population score to be between 0 and 1.
_UpperCAmelCase : int = [
(item, score / len(lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase ):
population.extend(select(population_score[int(lowerCAmelCase )] , lowerCAmelCase , lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
SCREAMING_SNAKE_CASE_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 300 |
import os
def __SCREAMING_SNAKE_CASE ( ) -> int:
with open(os.path.dirname(lowerCAmelCase ) + "/grid.txt" ) as f:
_UpperCAmelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase ) for x in f.readline().split()] )
_UpperCAmelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
_UpperCAmelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCAmelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
_UpperCAmelCase : Dict = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCAmelCase : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCAmelCase : Dict = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_UpperCAmelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCAmelCase : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 300 | 1 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = len(UpperCAmelCase )
lowercase__ : Union[str, Any] = [[0] * n for i in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
lowercase__ : str = y_points[i]
for i in range(2 , UpperCAmelCase ):
for j in range(UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | '''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: Optional[Any] = logging.get_logger(__name__)
__a: str = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "efficientnet"
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 600 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 3.1 , __lowerCAmelCase = 8 , __lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase = [] , __lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase = 0.2_5 , __lowerCAmelCase = "swish" , __lowerCAmelCase = 2560 , __lowerCAmelCase = "mean" , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 0.0_0_1 , __lowerCAmelCase = 0.9_9 , __lowerCAmelCase = 0.5 , __lowerCAmelCase = 0.2 , **__lowerCAmelCase , ) -> Tuple:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[int] = num_channels
lowercase__ : Any = image_size
lowercase__ : Tuple = width_coefficient
lowercase__ : Any = depth_coefficient
lowercase__ : List[Any] = depth_divisor
lowercase__ : Tuple = kernel_sizes
lowercase__ : Tuple = in_channels
lowercase__ : Optional[int] = out_channels
lowercase__ : Dict = depthwise_padding
lowercase__ : List[str] = strides
lowercase__ : List[Any] = num_block_repeats
lowercase__ : Union[str, Any] = expand_ratios
lowercase__ : str = squeeze_expansion_ratio
lowercase__ : str = hidden_act
lowercase__ : Optional[Any] = hidden_dim
lowercase__ : List[str] = pooling_type
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[str] = batch_norm_eps
lowercase__ : str = batch_norm_momentum
lowercase__ : Any = dropout_rate
lowercase__ : Any = drop_connect_rate
lowercase__ : Dict = sum(__lowerCAmelCase ) * 4
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = version.parse("1.11" )
@property
def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCAmelCase( self ) -> float:
return 1E-5
| 428 | 1 |
import mpmath # for roots of unity
import numpy as np
class lowercase :
def __init__( self : List[str] , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None ):
# Input as list
SCREAMING_SNAKE_CASE__ : List[str] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : Any = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Any = self.__multiply()
def lowercase__ ( self : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowercase ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Dict = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : str = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : List[Any] = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Dict = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = self.__dft('''A''' )
SCREAMING_SNAKE_CASE__ : int = self.__dft('''B''' )
SCREAMING_SNAKE_CASE__ : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : int = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Tuple = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Tuple = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : int = '''B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : List[str] = '''A*B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
snake_case_ = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResnetDownsampleBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CrossAttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SimpleCrossAttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SkipDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnSkipDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DownEncoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnDownEncoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case_ = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'mid'
def lowerCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaDCrossAttn # noqa F405
_SCREAMING_SNAKE_CASE = 'mid'
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaDSimpleCrossAttn # noqa F405
_SCREAMING_SNAKE_CASE = 'mid'
@property
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResnetUpsampleBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CrossAttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SimpleCrossAttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SkipUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
snake_case_ = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnSkipUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case_ = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UpDecoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ = {"in_channels": 3_2, "out_channels": 3_2}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnUpDecoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ = {"in_channels": 3_2, "out_channels": 3_2}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(_lowerCAmelCase )
| 283 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_UpperCamelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_UpperCamelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] ) -> Tuple:
lowerCAmelCase__ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase_ )[0]
@deprecated(UpperCAmelCase_ , """Please use tf.data to implement this functionality.""" )
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream:
lowerCAmelCase__ = _readaa(UpperCAmelCase_ )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowerCAmelCase__ = _readaa(UpperCAmelCase_ )
lowerCAmelCase__ = _readaa(UpperCAmelCase_ )
lowerCAmelCase__ = _readaa(UpperCAmelCase_ )
lowerCAmelCase__ = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta )
lowerCAmelCase__ = data.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 )
return data
@deprecated(UpperCAmelCase_ , """Please use tf.one_hot on tensors.""" )
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
lowerCAmelCase__ = labels_dense.shape[0]
lowerCAmelCase__ = numpy.arange(UpperCAmelCase_ ) * num_classes
lowerCAmelCase__ = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ = 1
return labels_one_hot
@deprecated(UpperCAmelCase_ , """Please use tf.data to implement this functionality.""" )
def _lowerCAmelCase( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=10 ) -> List[str]:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream:
lowerCAmelCase__ = _readaa(UpperCAmelCase_ )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowerCAmelCase__ = _readaa(UpperCAmelCase_ )
lowerCAmelCase__ = bytestream.read(UpperCAmelCase_ )
lowerCAmelCase__ = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase_ , UpperCAmelCase_ )
return labels
class lowerCamelCase__ :
'''simple docstring'''
@deprecated(
__A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Optional[int] , __A : Optional[Any] , __A : List[Any] , __A : Tuple=False , __A : str=False , __A : Any=dtypes.floataa , __A : Union[str, Any]=True , __A : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = random_seed.get_seed(__A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ = dtypes.as_dtype(__A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowerCAmelCase__ = 1_0000
lowerCAmelCase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowerCAmelCase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ = images.astype(numpy.floataa )
lowerCAmelCase__ = numpy.multiply(__A , 1.0 / 2_5_5.0 )
lowerCAmelCase__ = images
lowerCAmelCase__ = labels
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
@property
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return self._images
@property
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return self._labels
@property
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self._num_examples
@property
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return self._epochs_completed
def lowercase__ ( self : Optional[Any] , __A : Union[str, Any] , __A : Any=False , __A : List[Any]=True ) -> Optional[int]:
'''simple docstring'''
if fake_data:
lowerCAmelCase__ = [1] * 784
lowerCAmelCase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__A )],
[fake_label for _ in range(__A )],
)
lowerCAmelCase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__A )
lowerCAmelCase__ = self.images[perma]
lowerCAmelCase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ = self._num_examples - start
lowerCAmelCase__ = self._images[start : self._num_examples]
lowerCAmelCase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__A )
lowerCAmelCase__ = self.images[perm]
lowerCAmelCase__ = self.labels[perm]
# Start next epoch
lowerCAmelCase__ = 0
lowerCAmelCase__ = batch_size - rest_num_examples
lowerCAmelCase__ = self._index_in_epoch
lowerCAmelCase__ = self._images[start:end]
lowerCAmelCase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase_ , """Please write your own downloading logic.""" )
def _lowerCAmelCase( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ) -> int:
if not gfile.Exists(UpperCAmelCase_ ):
gfile.MakeDirs(UpperCAmelCase_ )
lowerCAmelCase__ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if not gfile.Exists(UpperCAmelCase_ ):
urllib.request.urlretrieve(UpperCAmelCase_ , UpperCAmelCase_ ) # noqa: S310
with gfile.GFile(UpperCAmelCase_ ) as f:
lowerCAmelCase__ = f.size()
print("""Successfully downloaded""" , UpperCAmelCase_ , UpperCAmelCase_ , """bytes.""" )
return filepath
@deprecated(
UpperCAmelCase_ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def _lowerCAmelCase( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=dtypes.floataa , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=5000 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=DEFAULT_SOURCE_URL , ) -> List[str]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase_ , one_hot=UpperCAmelCase_ , dtype=UpperCAmelCase_ , seed=UpperCAmelCase_ )
lowerCAmelCase__ = fake()
lowerCAmelCase__ = fake()
lowerCAmelCase__ = fake()
return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
if not source_url: # empty string check
lowerCAmelCase__ = DEFAULT_SOURCE_URL
lowerCAmelCase__ = """train-images-idx3-ubyte.gz"""
lowerCAmelCase__ = """train-labels-idx1-ubyte.gz"""
lowerCAmelCase__ = """t10k-images-idx3-ubyte.gz"""
lowerCAmelCase__ = """t10k-labels-idx1-ubyte.gz"""
lowerCAmelCase__ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_images_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowerCAmelCase__ = _extract_images(UpperCAmelCase_ )
lowerCAmelCase__ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowerCAmelCase__ = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ )
lowerCAmelCase__ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_images_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowerCAmelCase__ = _extract_images(UpperCAmelCase_ )
lowerCAmelCase__ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowerCAmelCase__ = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ )
if not 0 <= validation_size <= len(UpperCAmelCase_ ):
lowerCAmelCase__ = (
"""Validation size should be between 0 and """
F'''{len(UpperCAmelCase_ )}. Received: {validation_size}.'''
)
raise ValueError(UpperCAmelCase_ )
lowerCAmelCase__ = train_images[:validation_size]
lowerCAmelCase__ = train_labels[:validation_size]
lowerCAmelCase__ = train_images[validation_size:]
lowerCAmelCase__ = train_labels[validation_size:]
lowerCAmelCase__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowerCAmelCase__ = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase__ = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase__ = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
| 211 |
'''simple docstring'''
import argparse
import datetime
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
lowerCAmelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCAmelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase_ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCAmelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCAmelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCAmelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCAmelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCAmelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCAmelCase__ = datetime.date(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) )
# Start math
if m <= 2:
lowerCAmelCase__ = y - 1
lowerCAmelCase__ = m + 12
# maths var
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[:2] )
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[2:] )
lowerCAmelCase__ = int(2.6 * m - 5.39 )
lowerCAmelCase__ = int(c / 4 )
lowerCAmelCase__ = int(k / 4 )
lowerCAmelCase__ = int(d + k )
lowerCAmelCase__ = int(t + u + v + x )
lowerCAmelCase__ = int(z - (2 * c) )
lowerCAmelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCAmelCase__ = F'''Your date {date_input}, is a {days[str(UpperCAmelCase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 211 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a_ : Optional[Any] = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
a_ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
a_ : Optional[Any] = re.compile(R"""(?<!_)_(?!_)""")
a_ : Optional[int] = re.compile(R"""(_{2,})""")
a_ : List[Any] = R"""^\w+(\.\w+)*$"""
a_ : Any = R"""<>:/\|?*"""
def a_ ( __snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =_uppercase_uppercase_re.sub(r'''\1_\2''' , __snake_case )
lowerCamelCase_ =_lowercase_uppercase_re.sub(r'''\1_\2''' , __snake_case )
return name.lower()
def a_ ( __snake_case : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ =_single_underscore_re.split(__snake_case )
lowerCamelCase_ =[_multiple_underscores_re.split(__snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != '''''' )
def a_ ( __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if os.path.basename(__snake_case ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__snake_case )
def a_ ( __snake_case : Any , __snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if os.path.basename(__snake_case ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __snake_case ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__snake_case )}-{split}'''
def a_ ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int=None ) -> str:
"""simple docstring"""
lowerCamelCase_ =filename_prefix_for_split(__snake_case , __snake_case )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
return F'''{filepath}*'''
def a_ ( __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any]=None , __snake_case : Dict=None ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =filename_prefix_for_split(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
if shard_lengths:
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__snake_case )]
if filetype_suffix:
lowerCamelCase_ =[filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowerCamelCase_ =prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 676 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =[True] * limit
lowerCamelCase_ =False
lowerCamelCase_ =False
lowerCamelCase_ =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ =i * 2
while index < limit:
lowerCamelCase_ =False
lowerCamelCase_ =index + i
lowerCamelCase_ =[2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def a_ ( __snake_case : int = 100_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =prime_sieve(__snake_case )
lowerCamelCase_ =0
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowerCamelCase_ =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ =j - i
lowerCamelCase_ =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a ( A__ : Optional[int]=32 , A__ : str=10 , A__ : Tuple=100 , A__ : Union[str, Any]=1026 , A__ : Tuple=True , A__ : Dict="data/tokenized_stories_train_wikitext103.jbl" , A__ : Dict="igf_context_pairs.jbl" , ) -> Optional[int]:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
_lowercase , _lowercase =generate_datasets(
A__ , A__ , number=A__ , min_len=1026 , trim=A__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
_lowercase =load_gpta('gpt2' ).to(A__ )
print('computing perplexity on objective set' )
_lowercase =compute_perplexity(A__ , A__ , A__ ).item()
print('perplexity on objective set:' , A__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a ( A__ : str , A__ : str=15 , A__ : Optional[int]=128 , A__ : Dict=100 , A__ : Optional[Any]="igf_model.pt" , ) -> int:
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
_lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
_lowercase =SecondaryLearner(A__ )
# Train secondary learner
_lowercase =train_secondary_learner(
A__ , A__ , max_epochs=A__ , batch_size=A__ , eval_freq=100 , igf_model_path=A__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a ( A__ : Dict , A__ : Optional[Any] , A__ : str , A__ : Union[str, Any]=32 , A__ : List[str]=1000 , A__ : int=16 , A__ : List[str]=1.0 , A__ : Tuple=recopy_gpta , A__ : Any=None , A__ : Tuple=10 , A__ : Tuple="gpt2_finetuned.pt" , ) -> str:
"""simple docstring"""
_lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
_lowercase =RandomSampler(A__ )
_lowercase =DataLoader(A__ , sampler=A__ )
_lowercase =max_steps // (len(A__ )) + 1
_lowercase =0
_lowercase =torch.zeros((1, context_len) , dtype=torch.long , device=A__ )
_lowercase , _lowercase , _lowercase =recopy_model(A__ , A__ , A__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(A__ )
secondary_learner.eval()
_lowercase =[]
_lowercase =0
_lowercase =[]
_lowercase =[]
# Compute the performance of the transformer model at the beginning
_lowercase =compute_perplexity(A__ , A__ , A__ )
test_perps.append(A__ )
print('Test perplexity, step' , A__ , ':' , A__ )
for epoch in range(int(A__ ) ):
for step, example in enumerate(A__ ):
torch.cuda.empty_cache()
_lowercase =random.randint(0 , example.size(2 ) - context_len - 1 )
_lowercase =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowercase =model(A__ , labels=A__ )
_lowercase =True
if secondary_learner is not None:
_lowercase =secondary_learner.forward(
torch.tensor(A__ , dtype=torch.long , device=A__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowercase =-1
if predicted_q < threshold:
_lowercase =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowercase =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowercase =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowercase =compute_perplexity(A__ , A__ , A__ )
test_perps.append(A__ )
print('Test perplexity, step' , A__ , ':' , A__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=A__ , type=A__ , required=A__ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=A__ , type=A__ , required=A__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=A__ , default=A__ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=A__ , default=A__ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=A__ , type=A__ , required=A__ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=A__ , type=A__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=A__ , default=A__ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=A__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=A__ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=A__ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=A__ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=A__ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=A__ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=A__ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=A__ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=A__ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=A__ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=A__ , type=A__ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=A__ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=A__ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=A__ , type=A__ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A__ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
_lowercase =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
_lowercase =training_secondary_learner(
A__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
_lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowercase , _lowercase =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=A__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A__ , A__ , A__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A__ , secondary_learner=A__ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 380 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 380 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """WhisperFeatureExtractor"""
__magic_name__ :Dict = """WhisperTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = self.feature_extractor
lowerCAmelCase__ :Any = False
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('sampling_rate' , __UpperCAmelCase )
lowerCAmelCase__ :str = kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ :Optional[int] = encodings['input_ids']
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
| 93 | '''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
"""simple docstring"""
A_ = (DDPMParallelScheduler,)
def lowerCamelCase__ ( self : Optional[Any] , **lowercase_ : str ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowercase_ )
return config
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = len(lowercase_ )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = self.dummy_sample_deter + 0.1
lowercase_ = self.dummy_sample_deter - 0.1
lowercase_ = samplea.shape[0]
lowercase_ = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase_ = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
lowercase_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase_ = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = len(lowercase_ )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
lowercase_ = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = len(lowercase_ )
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
lowercase_ = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowercase_ ) )
lowercase_ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
lowercase_ = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
lowercase_ = -1
else:
lowercase_ = timesteps[i + 1]
lowercase_ = scheduler.previous_timestep(lowercase_ )
lowercase_ = prev_t.item()
self.assertEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = [100, 87, 50, 51, 0]
with self.assertRaises(lowercase_ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = [100, 87, 50, 1, 0]
lowercase_ = len(lowercase_ )
with self.assertRaises(lowercase_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowercase_ )
lowercase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 451 | 0 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase ) -> list[list[int]]:
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: list[int] = []
UpperCamelCase_: int = 0
UpperCamelCase_: Any = sum(lowerCamelCase )
create_state_space_tree(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return result
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
if sum(lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(lowerCamelCase )) < max_sum:
return
if sum(lowerCamelCase ) == max_sum:
result.append(lowerCamelCase )
return
for index in range(lowerCamelCase , len(lowerCamelCase ) ):
create_state_space_tree(
lowerCamelCase , lowerCamelCase , index + 1 , [*path, nums[index]] , lowerCamelCase , remaining_nums_sum - nums[index] , )
lowerCamelCase_ : Tuple = [3, 34, 4, 12, 5, 2]
lowerCamelCase_ : Optional[int] = 9
lowerCamelCase_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.