code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
A__: Union[str, Any] = logging.getLogger(__name__)
A__: Dict = {'''facebook/bart-base''': BartForConditionalGeneration}
A__: Any = {'''facebook/bart-base''': BartTokenizer}
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_a : Optional[Any] =argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_UpperCAmelCase ,default=_UpperCAmelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_UpperCAmelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_UpperCAmelCase ,default=_UpperCAmelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_UpperCAmelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_UpperCAmelCase ,)
parser.add_argument(
"""--config_name""" ,type=_UpperCAmelCase ,default=_UpperCAmelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_UpperCAmelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_UpperCAmelCase ,default=_UpperCAmelCase ,help="""Where to store the final ONNX file.""" )
_a : Optional[Any] =parser.parse_args()
return args
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any]="cpu" ) -> Dict:
_a : Optional[int] =model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_a : Dict =tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_a : Union[str, Any] =0
_a : Optional[int] =None
_a : Dict =0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ) -> Optional[Any]:
model.eval()
_a : Any =None
_a : Union[str, Any] =torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_a : Union[str, Any] ="""My friends are cool but they eat too many carbs."""
_a : int =tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_a : int =model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_UpperCAmelCase ,max_length=_UpperCAmelCase ,early_stopping=_UpperCAmelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_UpperCAmelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_UpperCAmelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_UpperCAmelCase ,)
logger.info("""Model exported to {}""".format(_UpperCAmelCase ) )
_a : Tuple =remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_UpperCAmelCase ) )
_a : List[Any] =onnxruntime.InferenceSession(_UpperCAmelCase )
_a : Any =ort_sess.run(
_UpperCAmelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_UpperCAmelCase ),
"""max_length""": np.array(_UpperCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_a : Any =parse_args()
_a : List[Any] =5
_a : str =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a : Tuple =torch.device(args.device )
_a , _a : Tuple =load_model_tokenizer(args.model_name_or_path ,_UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_UpperCAmelCase )
if args.max_length:
_a : Optional[Any] =args.max_length
if args.num_beams:
_a : List[Any] =args.num_beams
if args.output_file_path:
_a : Tuple =args.output_file_path
else:
_a : Optional[Any] ="""BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : List[str] =str(bin(_UpperCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : int =str(bin(_UpperCAmelCase ) )[2:]
if shift_amount >= len(_UpperCAmelCase ):
return "0b0"
_a : Union[str, Any] =binary_number[: len(_UpperCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
if number >= 0: # Get binary representation of positive number
_a : List[Any] ="""0""" + str(bin(_UpperCAmelCase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
_a : Any =len(bin(_UpperCAmelCase )[3:] ) # Find 2's complement of number
_a : int =bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
_a : Any =(
"""1""" + """0""" * (binary_number_length - len(_UpperCAmelCase )) + binary_number
)
if shift_amount >= len(_UpperCAmelCase ):
return "0b" + binary_number[0] * len(_UpperCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_UpperCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ,_UpperCAmelCase : int = 22 ) -> int:
_a : List[Any] =range(1 ,_UpperCAmelCase )
_a : Any =range(1 ,_UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
A__: Union[str, Any] = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
A__: List[Any] = re.compile(R'''([a-z\d])([A-Z])''')
A__: Optional[int] = re.compile(R'''(?<!_)_(?!_)''')
A__: Tuple = re.compile(R'''(_{2,})''')
A__: Optional[Any] = R'''^\w+(\.\w+)*$'''
A__: Any = R'''<>:/\|?*'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Any:
_a : int =_uppercase_uppercase_re.sub(R"""\1_\2""" ,_UpperCAmelCase )
_a : Union[str, Any] =_lowercase_uppercase_re.sub(R"""\1_\2""" ,_UpperCAmelCase )
return name.lower()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> str:
_a : Tuple =_single_underscore_re.split(_UpperCAmelCase )
_a : Any =[_multiple_underscores_re.split(_UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != """""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Any:
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> List[str]:
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re ,_UpperCAmelCase ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(_UpperCAmelCase )}-{split}"
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
_a : List[Any] =filename_prefix_for_split(_UpperCAmelCase ,_UpperCAmelCase )
if filetype_suffix:
prefix += F".{filetype_suffix}"
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
return F"{filepath}*"
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : str=None ) -> Optional[int]:
_a : Union[str, Any] =filename_prefix_for_split(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[int] =os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
if shard_lengths:
_a : Optional[Any] =len(_UpperCAmelCase )
_a : Dict =[F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_UpperCAmelCase )]
if filetype_suffix:
_a : Union[str, Any] =[filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
_a : Dict =prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A__: str = '''bert-base-cased'''
A__: Dict = '''google/pegasus-xsum'''
A__: int = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
A__: str = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
A__: Optional[int] = '''patrickvonplaten/t5-tiny-random'''
A__: Union[str, Any] = '''sshleifer/bart-tiny-random'''
A__: List[Any] = '''sshleifer/tiny-mbart'''
A__: str = '''sshleifer/tiny-marian-en-de'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Path ,_UpperCAmelCase : list ) -> Tuple:
_a : Optional[Any] ="""\n""".join(_UpperCAmelCase )
Path(_UpperCAmelCase ).open("""w""" ).writelines(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCAmelCase ,F"{split}.source" ) ,_UpperCAmelCase )
_dump_articles(os.path.join(_UpperCAmelCase ,F"{split}.target" ) ,_UpperCAmelCase )
return tmp_dir
class A__ ( UpperCAmelCase__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
'''simple docstring'''
_a : int =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_a : int =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a : Any =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
_a : Optional[int] =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
_a : Dict =4
_a : Tuple =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_a , _a : Optional[int] ="""ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
_a : str =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=SCREAMING_SNAKE_CASE , max_target_length=SCREAMING_SNAKE_CASE , src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , )
_a : Any =DataLoader(SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_a : List[Any] =shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
'''simple docstring'''
_a : str =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_a : int =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a : Optional[Any] =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
_a : List[str] =max(len(tokenizer.encode(SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
_a : Optional[Any] =4
_a : int =LegacySeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=2_0 , max_target_length=SCREAMING_SNAKE_CASE , )
_a : Union[str, Any] =DataLoader(SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : int =AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
_a : Any =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_a : Any =tmp_dir.joinpath("""train.source""" ).open().readlines()
_a : Optional[int] =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1_2_8 , SCREAMING_SNAKE_CASE )
_a : List[str] ={x.name for x in tmp_dir.iterdir()}
_a : Dict ={x.name for x in save_dir.iterdir()}
_a : str =save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 1
assert len(packed_examples[0] ) == sum(len(SCREAMING_SNAKE_CASE ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_a , _a , _a : Dict =self._get_dataset(max_len=6_4 )
_a : Any =6_4
_a : str =ds.make_dynamic_sampler(SCREAMING_SNAKE_CASE , required_batch_size_multiple=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =[len(SCREAMING_SNAKE_CASE ) for x in batch_sampler]
assert len(set(SCREAMING_SNAKE_CASE ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) # no dropped or added examples
_a : Any =DataLoader(SCREAMING_SNAKE_CASE , batch_sampler=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
_a : Union[str, Any] =[]
_a : str =[]
for batch in data_loader:
_a : Dict =batch["""input_ids"""].shape
_a : Optional[Any] =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_a : str =np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(SCREAMING_SNAKE_CASE )
if num_src_tokens > (max_tokens * 1.1):
failures.append(SCREAMING_SNAKE_CASE )
assert num_src_per_batch[0] == max(SCREAMING_SNAKE_CASE )
if failures:
raise AssertionError(f"too many tokens in {len(SCREAMING_SNAKE_CASE )} batches" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
_a , _a , _a : Tuple =self._get_dataset(max_len=5_1_2 )
_a : Union[str, Any] =2
_a : str =ds.make_sortish_sampler(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE )
_a : Dict =DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
_a : List[str] =DataLoader(SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 , sampler=SCREAMING_SNAKE_CASE )
_a : Any =tokenizer.pad_token_id
def count_pad_tokens(SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple="input_ids" ):
return [batch[k].eq(SCREAMING_SNAKE_CASE ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE , k="""labels""" ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE , k="""labels""" ) )
assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any=1_0_0_0 , SCREAMING_SNAKE_CASE :Any=1_2_8 ) -> List[Any]:
'''simple docstring'''
if os.getenv("""USE_REAL_DATA""" , SCREAMING_SNAKE_CASE ):
_a : str ="""examples/seq2seq/wmt_en_ro"""
_a : List[Any] =max_len * 2 * 6_4
if not Path(SCREAMING_SNAKE_CASE ).joinpath("""train.len""" ).exists():
save_len_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
_a : str ="""examples/seq2seq/test_data/wmt_en_ro"""
_a : List[Any] =max_len * 4
save_len_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Tuple =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=SCREAMING_SNAKE_CASE , max_target_length=SCREAMING_SNAKE_CASE , n_obs=SCREAMING_SNAKE_CASE , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a , _a , _a : Union[str, Any] =self._get_dataset()
_a : Tuple =set(DistributedSortishSampler(SCREAMING_SNAKE_CASE , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=SCREAMING_SNAKE_CASE ) )
_a : List[str] =set(DistributedSortishSampler(SCREAMING_SNAKE_CASE , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=SCREAMING_SNAKE_CASE ) )
assert idsa.intersection(SCREAMING_SNAKE_CASE ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
if tok_name == MBART_TINY:
_a : List[str] =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
_a : Optional[int] =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_a : Tuple =SeqaSeqDataset(
SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
_a : int =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(SCREAMING_SNAKE_CASE ) == 1 if tok_name == BART_TINY else len(SCREAMING_SNAKE_CASE ) == 0
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : int = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : List[Any] = False
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int]=False ) -> int:
'''simple docstring'''
_a : List[str] =super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
_a : Optional[Any] =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A__ ( UpperCAmelCase__ ):
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Union[str, Any]=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :str=9_9 , SCREAMING_SNAKE_CASE :Dict=3_2 , SCREAMING_SNAKE_CASE :Optional[Any]=3_2 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[int]=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :str=0.1 , SCREAMING_SNAKE_CASE :str=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=1_6 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Any=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Optional[Any]=4 , SCREAMING_SNAKE_CASE :int=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =parent
_a : Dict =batch_size
_a : Optional[int] =seq_length
_a : List[Any] =is_training
_a : int =use_input_mask
_a : Tuple =use_token_type_ids
_a : Optional[Any] =use_labels
_a : Optional[int] =vocab_size
_a : Tuple =hidden_size
_a : Any =num_hidden_layers
_a : int =num_attention_heads
_a : List[str] =intermediate_size
_a : int =hidden_act
_a : int =hidden_dropout_prob
_a : Tuple =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : int =type_vocab_size
_a : str =type_sequence_label_size
_a : Any =initializer_range
_a : List[Any] =num_labels
_a : int =num_choices
_a : int =scope
_a : str =embedding_size
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
_a : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_input_mask:
_a : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
_a : Any =None
if self.use_token_type_ids:
_a : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : List[str] =None
_a : Union[str, Any] =None
_a : Optional[int] =None
if self.use_labels:
_a : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : int =ids_tensor([self.batch_size] , self.num_choices )
_a : str =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : Optional[int] =TFMobileBertModel(config=SCREAMING_SNAKE_CASE )
_a : str ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : List[Any] =model(SCREAMING_SNAKE_CASE )
_a : List[str] =[input_ids, input_mask]
_a : List[str] =model(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
_a : Dict ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : List[str] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE )
_a : List[str] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : List[str] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple ) -> Any:
'''simple docstring'''
_a : int =TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Tuple =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.num_labels
_a : Any =TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE )
_a : Any ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : List[Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =self.num_choices
_a : Union[str, Any] =TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_a : str =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_a : Union[str, Any] =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_a : Optional[Any] ={
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_a : List[str] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Dict ) -> str:
'''simple docstring'''
_a : List[Any] =self.num_labels
_a : Tuple =TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Dict =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
_a : List[Any] =TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Tuple =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
_a : Union[str, Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : str =config_and_inputs
_a : Dict ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
_a : Union[str, Any] =TFMobileBertModelTest.TFMobileBertModelTester(self )
_a : Optional[int] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
_a : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_a : Optional[Any] =TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Any =TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
_a : Dict =tf.constant([[0, 1, 2, 3, 4, 5]] )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )[0]
_a : Union[str, Any] =[1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : int =tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 694 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class A__ :
def __init__( self :Dict ) -> None:
'''simple docstring'''
_a : Any =[2, 1, 2, -1]
_a : int =[1, 2, 3, 4]
def __UpperCAmelCase ( self :str ) -> list[float]:
'''simple docstring'''
_a : Any =len(self.first_signal )
_a : Optional[Any] =len(self.second_signal )
_a : Optional[Any] =max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create a zero matrix of max_length x max_length
_a : Any =[[0] * max_length for i in range(SCREAMING_SNAKE_CASE )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE ):
_a : str =deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE )
for j, item in enumerate(SCREAMING_SNAKE_CASE ):
matrix[i][j] += item
# multiply the matrix with the first signal
_a : int =np.matmul(np.transpose(SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Optional[Any] = logging.get_logger(__name__)
A__: str = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = "trocr"
__UpperCamelCase : Optional[int] = ["past_key_values"]
__UpperCamelCase : Tuple = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self :int , SCREAMING_SNAKE_CASE :str=5_0_2_6_5 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :str=4_0_9_6 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :List[str]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.0 , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Tuple=0.02 , SCREAMING_SNAKE_CASE :str=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Dict=1 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Any=2 , **SCREAMING_SNAKE_CASE :List[Any] , ) -> List[str]:
'''simple docstring'''
_a : str =vocab_size
_a : Tuple =d_model
_a : str =decoder_layers
_a : Any =decoder_attention_heads
_a : Tuple =decoder_ffn_dim
_a : List[Any] =activation_function
_a : Dict =max_position_embeddings
_a : List[Any] =dropout
_a : List[str] =attention_dropout
_a : Optional[Any] =activation_dropout
_a : List[str] =init_std
_a : int =decoder_layerdrop
_a : List[Any] =use_cache
_a : Optional[Any] =scale_embedding
_a : Optional[int] =use_learned_position_embeddings
_a : int =layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 694 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any]=7 , SCREAMING_SNAKE_CASE :Any=3 , SCREAMING_SNAKE_CASE :List[str]=3_0 , SCREAMING_SNAKE_CASE :Tuple=4_0_0 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE :Dict=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Optional[Any]=1 / 2_5_5 , SCREAMING_SNAKE_CASE :Dict=True , ) -> List[Any]:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a : Optional[Any] =size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
_a : List[str] =parent
_a : Optional[Any] =batch_size
_a : str =num_channels
_a : Optional[int] =min_resolution
_a : List[str] =max_resolution
_a : List[str] =do_resize
_a : Any =size
_a : Any =do_normalize
_a : Optional[Any] =image_mean
_a : Optional[int] =image_std
_a : str =do_rescale
_a : Tuple =rescale_factor
_a : Tuple =do_pad
def __UpperCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
_a : List[str] =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
_a , _a : Tuple =image.size
else:
_a , _a : Optional[Any] =image.shape[1], image.shape[2]
if w < h:
_a : Any =int(self.size["""shortest_edge"""] * h / w )
_a : Union[str, Any] =self.size["""shortest_edge"""]
elif w > h:
_a : Optional[int] =self.size["""shortest_edge"""]
_a : Optional[Any] =int(self.size["""shortest_edge"""] * w / h )
else:
_a : List[str] =self.size["""shortest_edge"""]
_a : int =self.size["""shortest_edge"""]
else:
_a : List[str] =[]
for image in image_inputs:
_a , _a : List[str] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a : str =max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
_a : int =max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = YolosImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
_a : int =YolosImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :str ) -> str:
'''simple docstring'''
_a : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
_a : Dict =self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> Any:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
# Initialize image_processing
_a : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_a : int =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_a , _a : Optional[Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a : Any =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
_a : List[str] =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
# Initialize image_processing
_a : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_a : Any =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_a , _a : Optional[Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
_a , _a : Dict =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
_a : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_a : Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_a , _a : str =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
_a , _a : Dict =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
# Initialize image_processings
_a : Optional[int] =self.image_processing_class(**self.image_processor_dict )
_a : str =self.image_processing_class(do_resize=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_rescale=SCREAMING_SNAKE_CASE )
# create random PyTorch tensors
_a : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_a : Tuple =image_processing_a.pad(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
_a : List[str] =image_processing_a(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
# prepare image and target
_a : Optional[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_a : List[Any] =json.loads(f.read() )
_a : int ={"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
_a : Any =YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
_a : Any =image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
_a : List[str] =torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE )
_a : Optional[int] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_a : Union[str, Any] =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE ) )
# verify boxes
_a : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE )
_a : str =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_a : Dict =torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_a : Optional[Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
_a : Any =torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE ) )
# verify orig_size
_a : Optional[int] =torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE ) )
# verify size
_a : int =torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE ) )
@slow
def __UpperCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
_a : Dict =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_a : int =json.loads(f.read() )
_a : Dict ={"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
_a : Optional[int] =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_a : List[Any] =YolosImageProcessor(format="""coco_panoptic""" )
_a : Optional[Any] =image_processing(images=SCREAMING_SNAKE_CASE , annotations=SCREAMING_SNAKE_CASE , masks_path=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
_a : List[Any] =torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , SCREAMING_SNAKE_CASE )
_a : List[str] =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_a : Union[str, Any] =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , SCREAMING_SNAKE_CASE ) )
# verify boxes
_a : Union[str, Any] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_a : List[str] =torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_a : Union[str, Any] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , SCREAMING_SNAKE_CASE ) )
# verify class_labels
_a : str =torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , SCREAMING_SNAKE_CASE ) )
# verify masks
_a : Optional[int] =8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , SCREAMING_SNAKE_CASE )
# verify orig_size
_a : List[Any] =torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , SCREAMING_SNAKE_CASE ) )
# verify size
_a : List[str] =torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , SCREAMING_SNAKE_CASE ) )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__: List[Any] = logging.get_logger(__name__)
A__: Optional[Any] = {'''vocab_file''': '''spiece.model'''}
A__: Optional[int] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class A__ ( UpperCAmelCase__ ):
def __init__( self :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Dict="</s>" , SCREAMING_SNAKE_CASE :List[Any]="<unk>" , SCREAMING_SNAKE_CASE :Tuple="<sep>" , SCREAMING_SNAKE_CASE :Optional[int]="<pad>" , SCREAMING_SNAKE_CASE :List[Any]="<cls>" , SCREAMING_SNAKE_CASE :str="<mask>" , SCREAMING_SNAKE_CASE :List[str]=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
_a : Tuple =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : int =3
_a : str =do_lower_case
_a : Union[str, Any] =remove_space
_a : str =keep_accents
_a : int =vocab_file
_a : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : Optional[int] =jieba
_a : List[Any] =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Dict:
'''simple docstring'''
_a : Union[str, Any] =self.__dict__.copy()
_a : str =None
return state
def __setstate__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : List[str] ={}
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :List[str] ) -> Tuple:
'''simple docstring'''
if self.remove_space:
_a : Tuple =""" """.join(inputs.strip().split() )
else:
_a : Dict =inputs
_a : Dict =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] =unicodedata.normalize("""NFKD""" , SCREAMING_SNAKE_CASE )
_a : List[Any] ="""""".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
_a : Union[str, Any] =outputs.lower()
return outputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : str =self.preprocess_text(SCREAMING_SNAKE_CASE )
_a : List[str] =self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
_a : int =[]
for piece in pieces:
if len(SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict =self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Optional[int] =cur_pieces[1:]
else:
_a : Dict =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE )
else:
new_pieces.append(SCREAMING_SNAKE_CASE )
return new_pieces
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : Union[str, Any] ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : Dict =[self.sep_token_id]
_a : Dict =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : Optional[Any] =[self.sep_token_id]
_a : List[str] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : List[str] =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __UpperCAmelCase ( self :Optional[int] , *SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Dict =super()._decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[Any] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[Any] = "xmod"
def __init__( self :Dict , SCREAMING_SNAKE_CASE :List[str]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :Optional[int]=1_2 , SCREAMING_SNAKE_CASE :Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Dict=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[Any]="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Dict=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=("en_XX",) , SCREAMING_SNAKE_CASE :Optional[int]=None , **SCREAMING_SNAKE_CASE :Any , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : List[str] =vocab_size
_a : Tuple =hidden_size
_a : str =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Dict =hidden_act
_a : Union[str, Any] =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : str =attention_probs_dropout_prob
_a : Tuple =max_position_embeddings
_a : Optional[Any] =type_vocab_size
_a : Any =initializer_range
_a : List[str] =layer_norm_eps
_a : Dict =position_embedding_type
_a : List[str] =use_cache
_a : Any =classifier_dropout
_a : Optional[Any] =pre_norm
_a : Dict =adapter_reduction_factor
_a : List[str] =adapter_layer_norm
_a : Union[str, Any] =adapter_reuse_layer_norm
_a : List[Any] =ln_before_adapter
_a : Tuple =list(SCREAMING_SNAKE_CASE )
_a : List[str] =default_language
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : List[str] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : Union[str, Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A__: Tuple = random.Random()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any]=1.0 ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=None ) -> Tuple:
if rng is None:
_a : List[str] =global_rng
_a : Union[str, Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any]=7 , SCREAMING_SNAKE_CASE :str=4_0_0 , SCREAMING_SNAKE_CASE :int=2_0_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=1_0 , SCREAMING_SNAKE_CASE :Optional[Any]=1_6_0 , SCREAMING_SNAKE_CASE :Dict=8 , SCREAMING_SNAKE_CASE :Tuple=0.0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :List[Any]=True , ) -> List[Any]:
'''simple docstring'''
_a : str =parent
_a : Tuple =batch_size
_a : Union[str, Any] =min_seq_length
_a : int =max_seq_length
_a : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : Any =padding_value
_a : List[Any] =sampling_rate
_a : Dict =return_attention_mask
_a : Optional[Any] =do_normalize
_a : int =feature_size
_a : Tuple =chunk_length
_a : List[str] =hop_length
def __UpperCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Any=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE :str ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
_a : Union[str, Any] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[str] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : List[str] =[np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =WhisperFeatureExtractionTester(self )
def __UpperCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
_a : int =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int =feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE )
_a : Optional[int] =self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[int] =feat_extract_first.to_dict()
_a : Any =feat_extract_second.to_dict()
_a : Any =feat_extract_first.mel_filters
_a : str =feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
_a : Any =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Union[str, Any] =os.path.join(SCREAMING_SNAKE_CASE , """feat_extract.json""" )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : List[Any] =self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE )
_a : str =feat_extract_first.to_dict()
_a : str =feat_extract_second.to_dict()
_a : str =feat_extract_first.mel_filters
_a : Any =feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_a : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a : Tuple =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : Optional[int] =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
_a : List[str] =feature_extractor(SCREAMING_SNAKE_CASE , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_a : List[Any] =feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_a : Optional[Any] =feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
_a : Any =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : Tuple =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_a : Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : Dict =np.asarray(SCREAMING_SNAKE_CASE )
_a : Any =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : List[Any] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test truncation required
_a : Union[str, Any] =[floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_a : str =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
_a : int =[x[: feature_extractor.n_samples] for x in speech_inputs]
_a : Optional[Any] =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
_a : int =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : Optional[Any] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
import torch
_a : int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : List[Any] =np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_a : Dict =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a : Dict =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_a : Dict =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Tuple =load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_a : List[Any] =ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
# fmt: off
_a : List[Any] =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
_a : Optional[int] =self._load_datasamples(1 )
_a : Optional[int] =WhisperFeatureExtractor()
_a : List[Any] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Optional[Any] =self._load_datasamples(1 )[0]
_a : Dict =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_a : Dict =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE ) - 1 ) < 1e-3 ) )
| 694 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000000 ,_UpperCAmelCase : int = 10 ) -> int:
_a : defaultdict =defaultdict(_UpperCAmelCase )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_a : Union[str, Any] =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_a : List[str] =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :int = 1_2_8 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :float = 2_000.0 , SCREAMING_SNAKE_CASE :int = 7_6_8 , SCREAMING_SNAKE_CASE :int = 1_2 , SCREAMING_SNAKE_CASE :int = 1_2 , SCREAMING_SNAKE_CASE :int = 6_4 , SCREAMING_SNAKE_CASE :int = 2_0_4_8 , SCREAMING_SNAKE_CASE :float = 0.1 , ) -> Tuple:
'''simple docstring'''
super().__init__()
_a : str =nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
_a : List[str] =nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =False
_a : Any =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
_a : List[Any] =nn.Dropout(p=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
_a : List[str] =DecoderLayer(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
self.decoders.append(SCREAMING_SNAKE_CASE )
_a : Tuple =TaLayerNorm(SCREAMING_SNAKE_CASE )
_a : int =nn.Dropout(p=SCREAMING_SNAKE_CASE )
_a : List[str] =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a , _a , _a : Union[str, Any] =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_a : Dict =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_a : Optional[Any] =self.conditioning_emb(SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_a : Union[str, Any] =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_a : Tuple =torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_a : List[str] =self.position_encoding(SCREAMING_SNAKE_CASE )
_a : Tuple =self.continuous_inputs_projection(SCREAMING_SNAKE_CASE )
inputs += position_encodings
_a : Tuple =self.dropout(SCREAMING_SNAKE_CASE )
# decoder: No padding present.
_a : str =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_a : str =[(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_a : Optional[int] =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_a : int =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_a : Optional[int] =lyr(
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )[0]
_a : Union[str, Any] =self.decoder_norm(SCREAMING_SNAKE_CASE )
_a : Dict =self.post_dropout(SCREAMING_SNAKE_CASE )
_a : Dict =self.spec_out(SCREAMING_SNAKE_CASE )
return spec_out
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[str]=1e-6 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_a : str =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Any=None , ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =self.layer[0](
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
_a : Any =torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
_a : Union[str, Any] =self.layer[1](
SCREAMING_SNAKE_CASE , key_value_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
_a : Optional[int] =self.layer[-1](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (hidden_states,)
class A__ ( nn.Module ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict ) -> List[str]:
'''simple docstring'''
super().__init__()
_a : List[str] =TaLayerNorm(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
_a : List[Any] =nn.Dropout(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , ) -> Tuple:
'''simple docstring'''
# pre_self_attention_layer_norm
_a : Optional[int] =self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
_a : Any =self.FiLMLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Self-attention block
_a : Tuple =self.attention(SCREAMING_SNAKE_CASE )
_a : Any =hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
_a : Tuple =Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
_a : List[str] =TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =nn.Dropout(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Dict =self.layer_norm(SCREAMING_SNAKE_CASE )
_a : Any =self.attention(
SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
_a : Union[str, Any] =hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return layer_output
class A__ ( nn.Module ):
def __init__( self :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_a : Tuple =TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
_a : int =TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
_a : List[Any] =TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
_a : Any =nn.Dropout(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int]=None ) -> Dict:
'''simple docstring'''
_a : List[str] =self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
_a : Tuple =self.film(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =self.DenseReluDense(SCREAMING_SNAKE_CASE )
_a : Tuple =hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_a : Dict =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
_a : str =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
_a : List[Any] =nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
_a : List[Any] =nn.Dropout(SCREAMING_SNAKE_CASE )
_a : int =NewGELUActivation()
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] ) -> List[str]:
'''simple docstring'''
_a : Union[str, Any] =self.act(self.wi_a(SCREAMING_SNAKE_CASE ) )
_a : List[str] =self.wi_a(SCREAMING_SNAKE_CASE )
_a : Optional[int] =hidden_gelu * hidden_linear
_a : Optional[Any] =self.dropout(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =self.wo(SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any]=1e-6 ) -> List[str]:
'''simple docstring'''
super().__init__()
_a : Optional[int] =nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE ) )
_a : Union[str, Any] =eps
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_a : Optional[int] =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE )
_a : Optional[int] =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_a : List[str] =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A__ ( nn.Module ):
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(SCREAMING_SNAKE_CASE , 3.0 )) ))
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> int:
'''simple docstring'''
super().__init__()
_a : Dict =nn.Linear(SCREAMING_SNAKE_CASE , out_features * 2 , bias=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.scale_bias(SCREAMING_SNAKE_CASE )
_a , _a : Optional[int] =torch.chunk(SCREAMING_SNAKE_CASE , 2 , -1 )
_a : int =x * (1 + scale) + shift
return x
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 50 ) -> int:
_a : List[Any] =[1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 694 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A__: Dict = logging.get_logger(__name__)
A__: str = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "mctct"
def __init__( self :Dict , SCREAMING_SNAKE_CASE :str=8_0_6_5 , SCREAMING_SNAKE_CASE :Dict=1_5_3_6 , SCREAMING_SNAKE_CASE :str=3_6 , SCREAMING_SNAKE_CASE :List[Any]=6_1_4_4 , SCREAMING_SNAKE_CASE :Optional[int]=4 , SCREAMING_SNAKE_CASE :Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE :Optional[Any]=9_2_0 , SCREAMING_SNAKE_CASE :Any=1e-5 , SCREAMING_SNAKE_CASE :Tuple=0.3 , SCREAMING_SNAKE_CASE :Any="relu" , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :str=0.3 , SCREAMING_SNAKE_CASE :Optional[Any]=0.3 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :Optional[Any]=0 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :List[str]=1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.3 , SCREAMING_SNAKE_CASE :Dict=1 , SCREAMING_SNAKE_CASE :Tuple=(7,) , SCREAMING_SNAKE_CASE :Any=(3,) , SCREAMING_SNAKE_CASE :Optional[int]=8_0 , SCREAMING_SNAKE_CASE :str=1 , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :List[Any]="sum" , SCREAMING_SNAKE_CASE :Tuple=False , **SCREAMING_SNAKE_CASE :List[Any] , ) -> int:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
_a : Tuple =vocab_size
_a : List[Any] =hidden_size
_a : Optional[Any] =num_hidden_layers
_a : Any =intermediate_size
_a : int =num_attention_heads
_a : Dict =attention_head_dim
_a : int =max_position_embeddings
_a : int =layer_norm_eps
_a : int =layerdrop
_a : Union[str, Any] =hidden_act
_a : Union[str, Any] =initializer_range
_a : List[str] =hidden_dropout_prob
_a : List[Any] =attention_probs_dropout_prob
_a : List[Any] =pad_token_id
_a : Tuple =bos_token_id
_a : List[Any] =eos_token_id
_a : Optional[int] =conv_glu_dim
_a : Dict =conv_dropout
_a : Union[str, Any] =num_conv_layers
_a : Dict =input_feat_per_channel
_a : Union[str, Any] =input_channels
_a : Optional[Any] =conv_channels
_a : str =ctc_loss_reduction
_a : int =ctc_zero_infinity
# prevents config testing fail with exporting to json
_a : Tuple =list(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =list(SCREAMING_SNAKE_CASE )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 694 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A__: List[Any] = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
A__: List[str] = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A__: Any = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A__: str = requests.get(image_url).content
A__: Tuple = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : torch.FloatTensor
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self :str , SCREAMING_SNAKE_CASE :int = 6_5_5_3_6 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :str = "fourier" , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :float = 0.0 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE :Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE :Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :Tuple[int] = (3_2, 3_2, 6_4) , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :int = 8 , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :bool = False , ) -> str:
'''simple docstring'''
super().__init__()
_a : Union[str, Any] =sample_size
# time
if time_embedding_type == "fourier":
_a : Any =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE , log=SCREAMING_SNAKE_CASE , flip_sin_to_cos=SCREAMING_SNAKE_CASE )
_a : Dict =2 * block_out_channels[0]
elif time_embedding_type == "positional":
_a : str =Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE , downscale_freq_shift=SCREAMING_SNAKE_CASE )
_a : int =block_out_channels[0]
if use_timestep_embedding:
_a : Optional[Any] =block_out_channels[0] * 4
_a : Union[str, Any] =TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE , time_embed_dim=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , out_dim=block_out_channels[0] , )
_a : Dict =nn.ModuleList([] )
_a : Dict =None
_a : int =nn.ModuleList([] )
_a : Any =None
# down
_a : str =in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
_a : Tuple =output_channel
_a : Dict =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_a : int =i == len(SCREAMING_SNAKE_CASE ) - 1
_a : Optional[Any] =get_down_block(
SCREAMING_SNAKE_CASE , num_layers=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
_a : List[Any] =get_mid_block(
SCREAMING_SNAKE_CASE , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE , add_downsample=SCREAMING_SNAKE_CASE , )
# up
_a : str =list(reversed(SCREAMING_SNAKE_CASE ) )
_a : List[Any] =reversed_block_out_channels[0]
if out_block_type is None:
_a : List[str] =out_channels
else:
_a : List[str] =block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
_a : Dict =output_channel
_a : Optional[int] =(
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE ) - 1 else final_upsample_channels
)
_a : Dict =i == len(SCREAMING_SNAKE_CASE ) - 1
_a : Union[str, Any] =get_up_block(
SCREAMING_SNAKE_CASE , num_layers=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
_a : Optional[int] =output_channel
# out
_a : str =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_a : Union[str, Any] =get_out_block(
out_block_type=SCREAMING_SNAKE_CASE , num_groups_out=SCREAMING_SNAKE_CASE , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE :bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
_a : Any =timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE ):
_a : str =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
_a : Any =timesteps[None].to(sample.device )
_a : Any =self.time_proj(SCREAMING_SNAKE_CASE )
if self.config.use_timestep_embedding:
_a : Dict =self.time_mlp(SCREAMING_SNAKE_CASE )
else:
_a : Any =timestep_embed[..., None]
_a : Dict =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_a : List[Any] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_a : Tuple =()
for downsample_block in self.down_blocks:
_a , _a : Optional[Any] =downsample_block(hidden_states=SCREAMING_SNAKE_CASE , temb=SCREAMING_SNAKE_CASE )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_a : Optional[Any] =self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_a : List[str] =down_block_res_samples[-1:]
_a : Optional[int] =down_block_res_samples[:-1]
_a : Optional[int] =upsample_block(SCREAMING_SNAKE_CASE , res_hidden_states_tuple=SCREAMING_SNAKE_CASE , temb=SCREAMING_SNAKE_CASE )
# 5. post-process
if self.out_block:
_a : List[Any] =self.out_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
A__: Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_a : Dict =list(s_dict.keys() )
for key in keys:
_a : Any =R""".*/layers_(\d+)"""
_a : str =key
if re.match(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Any =re.sub(R"""layers_(\d+)""" ,R"""block/\1/layer""" ,_UpperCAmelCase )
_a : Optional[int] =R"""(encoder|decoder)\/"""
if re.match(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict =re.match(_UpperCAmelCase ,_UpperCAmelCase ).groups()
if groups[0] == "encoder":
_a : Union[str, Any] =re.sub(R"""/mlp/""" ,R"""/1/mlp/""" ,_UpperCAmelCase )
_a : int =re.sub(R"""/pre_mlp_layer_norm/""" ,R"""/1/layer_norm/""" ,_UpperCAmelCase )
elif groups[0] == "decoder":
_a : str =re.sub(R"""/mlp/""" ,R"""/2/mlp/""" ,_UpperCAmelCase )
_a : str =re.sub(R"""/pre_mlp_layer_norm/""" ,R"""/2/layer_norm/""" ,_UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a : Union[str, Any] =new_key.replace(_UpperCAmelCase ,_UpperCAmelCase )
print(F"{key} -> {new_key}" )
_a : List[Any] =s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : str =s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Dict =s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a : Union[str, Any] =s_dict[key].shape[0]
_a : Dict =s_dict[key]
for idx in range(_UpperCAmelCase ):
_a : Optional[Any] =expert_weihts[idx]
print(F"{key} -> {key.replace('expert/' ,'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
A__: List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> List[str]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase ,"""r""" ) as f:
_a : Dict =f.read()
_a : int =re.findall(R"""(.*) = ([0-9.]*)""" ,_UpperCAmelCase )
_a : Any ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a : List[Any] =float(_UpperCAmelCase ) if """.""" in value else int(_UpperCAmelCase )
_a : str =re.findall(R"""(.*activations) = \(\'(.*)\',\)""" ,_UpperCAmelCase )[0]
_a : Any =str(activation[1] )
_a : List[str] =num_experts
_a : int =SwitchTransformersConfig(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Union[str, Any]="./" ,_UpperCAmelCase : List[Any]=8 ) -> int:
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
_a : Dict =checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
_a : Dict =convert_gin_to_config(_UpperCAmelCase ,_UpperCAmelCase )
else:
_a : str =SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
_a : List[Any] =SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
_a : int =flax_params["""target"""]
_a : Optional[int] =flatten_dict(_UpperCAmelCase ,sep="""/""" )
_a : str =rename_keys(_UpperCAmelCase )
_a : str =unflatten_dict(_UpperCAmelCase ,sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
A__: int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
class A__ :
def __init__( self :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Dict ) -> str:
'''simple docstring'''
_a : Optional[Any] =name
_a : str =value
_a : str =weight
def __repr__( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
return self.value
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.name
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.weight
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : str ) -> Optional[int]:
_a : List[Any] =[]
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> Dict:
_a : Union[str, Any] =sorted(_UpperCAmelCase ,key=_UpperCAmelCase ,reverse=_UpperCAmelCase )
_a : Dict =[]
_a , _a : int =0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,) -> float:
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
_UpperCAmelCase ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
_a : Optional[int] =AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(SCREAMING_SNAKE_CASE )
from datasets import load_dataset
_a : Optional[Any] =load_dataset("""nielsr/rvlcdip-demo""" )
_a : Optional[Any] =dataset["""train"""][0]["""image"""].convert("""RGB""" )
_a : Union[str, Any] =image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : List[str] =model(**SCREAMING_SNAKE_CASE )
_a : Any =outputs.logits
_a : int =torch.Size((1, 1_6) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
_a : int =torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A__: str = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
A__: int = {
'''camembert-base''': 512,
}
A__: Any = '''▁'''
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :str="</s>" , SCREAMING_SNAKE_CASE :List[str]="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE :List[str]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Tuple=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :Any , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : Dict =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : str ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
_a : str =vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_a : Union[str, Any] ={"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_a : Union[str, Any] =len(self.fairseq_tokens_to_ids )
_a : Dict =len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Tuple =[self.cls_token_id]
_a : Optional[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : str =[self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : int ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]:
'''simple docstring'''
_a : List[Any] =[]
_a : Tuple =""""""
_a : str =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
_a : List[Any] =True
_a : List[str] =[]
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
_a : List[Any] =False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : Union[str, Any] =self.__dict__.copy()
_a : int =None
return state
def __setstate__( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Optional[int] ={}
_a : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : List[str] =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Any =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
A__: Optional[Any] = '''2020.9.26'''
A__: Optional[Any] = '''xcodz-dot, cclaus, dhruvmanila'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> tuple[float, float]:
if not all(isinstance(_UpperCAmelCase ,(float, int) ) for val in locals().values() ):
_a : Any =F"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(_UpperCAmelCase )
_a : str =((x * distance) / (z + distance)) * scale
_a : str =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : str ,_UpperCAmelCase : float ) -> tuple[float, float, float]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""Axis must be a str""" )
_a : Any =locals()
del input_variables["axis"]
if not all(isinstance(_UpperCAmelCase ,(float, int) ) for val in input_variables.values() ):
_a : Union[str, Any] =(
"""Input values except axis must either be float or int: """
F"{list(input_variables.values() )}"
)
raise TypeError(_UpperCAmelCase )
_a : Any =(angle % 360) / 450 * 180 / math.pi
if axis == "z":
_a : Dict =x * math.cos(_UpperCAmelCase ) - y * math.sin(_UpperCAmelCase )
_a : int =y * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
_a : Union[str, Any] =z
elif axis == "x":
_a : Optional[Any] =y * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
_a : Any =z * math.cos(_UpperCAmelCase ) + y * math.sin(_UpperCAmelCase )
_a : Dict =x
elif axis == "y":
_a : List[Any] =x * math.cos(_UpperCAmelCase ) - z * math.sin(_UpperCAmelCase )
_a : List[Any] =z * math.cos(_UpperCAmelCase ) + x * math.sin(_UpperCAmelCase )
_a : Any =y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :int=7 , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=[1, 1, 2] , SCREAMING_SNAKE_CASE :List[str]=1 , SCREAMING_SNAKE_CASE :int=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :Optional[int]=8 , SCREAMING_SNAKE_CASE :Dict=3_7 , SCREAMING_SNAKE_CASE :int="gelu_new" , SCREAMING_SNAKE_CASE :str=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Any=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :Tuple=3 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :Dict=4 , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :Tuple=False , ) -> str:
'''simple docstring'''
_a : Any =parent
_a : Optional[Any] =batch_size
_a : List[Any] =seq_length
_a : List[Any] =is_training
_a : Tuple =use_input_mask
_a : Any =use_token_type_ids
_a : Tuple =use_labels
_a : Dict =vocab_size
_a : List[str] =block_sizes
_a : Optional[int] =num_decoder_layers
_a : Union[str, Any] =d_model
_a : int =n_head
_a : Dict =d_head
_a : Optional[int] =d_inner
_a : int =hidden_act
_a : int =hidden_dropout
_a : Optional[Any] =attention_dropout
_a : Tuple =activation_dropout
_a : Union[str, Any] =max_position_embeddings
_a : List[str] =type_vocab_size
_a : Optional[int] =2
_a : Dict =num_labels
_a : Optional[Any] =num_choices
_a : List[Any] =scope
_a : Optional[Any] =initializer_std
# Used in the tests to check the size of the first attention layer
_a : Optional[int] =n_head
# Used in the tests to check the size of the first hidden state
_a : Dict =self.d_model
# Used in the tests to check the number of output hidden states/attentions
_a : Optional[int] =sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_a : Union[str, Any] =self.num_hidden_layers + 2
def __UpperCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
_a : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Optional[int] =None
if self.use_input_mask:
_a : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] =None
if self.use_token_type_ids:
_a : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Dict =None
_a : Any =None
_a : Union[str, Any] =None
if self.use_labels:
_a : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
_a : Any =FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any] , ) -> Any:
'''simple docstring'''
_a : Optional[int] =TFFunnelModel(config=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Tuple =model(SCREAMING_SNAKE_CASE )
_a : Dict =[input_ids, input_mask]
_a : Dict =model(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_a : str =False
_a : Union[str, Any] =TFFunnelModel(config=SCREAMING_SNAKE_CASE )
_a : Dict =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_a : List[str] =False
_a : str =TFFunnelModel(config=SCREAMING_SNAKE_CASE )
_a : int =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , ) -> str:
'''simple docstring'''
_a : Any =TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : int =[input_ids, input_mask]
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_a : Any =False
_a : Optional[int] =TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE )
_a : int =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_a : List[str] =False
_a : Optional[int] =TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE )
_a : List[str] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , ) -> Dict:
'''simple docstring'''
_a : Union[str, Any] =TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE )
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE )
_a : Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : int =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , ) -> str:
'''simple docstring'''
_a : List[str] =self.num_labels
_a : Any =TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE )
_a : Any ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[str] , ) -> List[str]:
'''simple docstring'''
_a : List[str] =self.num_choices
_a : Optional[int] =TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE )
_a : str =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_a : Optional[int] =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_a : List[str] =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
_a : str ={
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_a : List[Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str , ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =self.num_labels
_a : str =TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE )
_a : Any ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : List[Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple =TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
_a : Optional[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
_a : int =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : Union[str, Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : Tuple = False
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
_a : str =TFFunnelModelTester(self )
_a : List[str] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
_a : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
_a : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[int] = False
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
_a : int =TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE )
_a : Any =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__: Tuple = None
try:
import msvcrt
except ImportError:
A__: List[Any] = None
try:
import fcntl
except ImportError:
A__: Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__: Optional[Any] = OSError
# Data
# ------------------------------------------------
A__: int = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
A__: Union[str, Any] = '''3.0.12'''
A__: Union[str, Any] = None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
global _logger
_a : Tuple =_logger or logging.getLogger(__name__ )
return _logger
class A__ ( UpperCAmelCase__ ):
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : List[str] =lock_file
return None
def __str__( self :Optional[int] ) -> List[str]:
'''simple docstring'''
_a : List[str] =f"The file lock '{self.lock_file}' could not be acquired."
return temp
class A__ :
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : Union[str, Any] =lock
return None
def __enter__( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return self.lock
def __exit__( self :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]:
'''simple docstring'''
self.lock.release()
return None
class A__ :
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str=-1 , SCREAMING_SNAKE_CASE :int=None ) -> Tuple:
'''simple docstring'''
_a : str =max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_a : Union[str, Any] =self.hash_filename_if_too_long(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# The path to the lock file.
_a : Union[str, Any] =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a : Dict =None
# The default timeout value.
_a : str =timeout
# We use this lock primarily for the lock counter.
_a : Optional[int] =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a : Tuple =0
return None
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self._lock_file
@property
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
return self._timeout
@timeout.setter
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =float(SCREAMING_SNAKE_CASE )
return None
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
raise NotImplementedError()
@property
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
return self._lock_file_fd is not None
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=0.05 ) -> Any:
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a : int =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a : List[Any] =id(self )
_a : int =self._lock_file
_a : Tuple =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a : List[str] =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int]=False ) -> Any:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a : Optional[Any] =id(self )
_a : List[str] =self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
_a : Any =0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self :Dict , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> Any:
'''simple docstring'''
self.release()
return None
def __del__( self :Optional[int] ) -> Any:
'''simple docstring'''
self.release(force=SCREAMING_SNAKE_CASE )
return None
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
_a : Any =os.path.basename(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
_a : Tuple =os.path.dirname(SCREAMING_SNAKE_CASE )
_a : Tuple =str(hash(SCREAMING_SNAKE_CASE ) )
_a : Optional[int] =filename[: max_length - len(SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return path
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=-1 , SCREAMING_SNAKE_CASE :Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , max_filename_length=SCREAMING_SNAKE_CASE )
_a : List[Any] ="""\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a : Any =os.open(self._lock_file , SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(SCREAMING_SNAKE_CASE )
else:
_a : int =fd
return None
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Dict =self._lock_file_fd
_a : Optional[Any] =None
msvcrt.locking(SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A__ ( UpperCAmelCase__ ):
def __init__( self :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str=-1 , SCREAMING_SNAKE_CASE :List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
_a : int =os.statvfs(os.path.dirname(SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , max_filename_length=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Any =os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a : Any =os.open(self._lock_file , SCREAMING_SNAKE_CASE )
try:
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(SCREAMING_SNAKE_CASE )
else:
_a : Optional[int] =fd
return None
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a : Optional[Any] =self._lock_file_fd
_a : Dict =None
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(SCREAMING_SNAKE_CASE )
return None
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Tuple =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a : int =os.open(self._lock_file , SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
_a : Tuple =fd
return None
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
os.close(self._lock_file_fd )
_a : List[str] =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__: Any = None
if msvcrt:
A__: Tuple = WindowsFileLock
elif fcntl:
A__: Union[str, Any] = UnixFileLock
else:
A__: Tuple = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 694 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = 0 ) -> list:
_a : Union[str, Any] =end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =i
_a : int =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_a : Any =array[temp_index - 1]
temp_index -= 1
_a : List[str] =temp_index_value
return array
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> None: # Max Heap
_a : Dict =index
_a : Any =2 * index + 1 # Left Node
_a : Any =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_a : str =left_index
if right_index < heap_size and array[largest] < array[right_index]:
_a : Any =right_index
if largest != index:
_a , _a : Union[str, Any] =array[largest], array[index]
heapify(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> list:
_a : str =len(_UpperCAmelCase )
for i in range(n // 2 ,-1 ,-1 ):
heapify(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(n - 1 ,0 ,-1 ):
_a , _a : int =array[0], array[i]
heapify(_UpperCAmelCase ,0 ,_UpperCAmelCase )
return array
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
_a : Optional[int] =low
_a : List[str] =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_a , _a : Union[str, Any] =array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> list:
if len(_UpperCAmelCase ) == 0:
return array
_a : Tuple =2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
_a : Optional[int] =16
return intro_sort(_UpperCAmelCase ,0 ,len(_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
_a : Any =median_of_a(_UpperCAmelCase ,_UpperCAmelCase ,start + ((end - start) // 2) + 1 ,end - 1 )
_a : Any =partition(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
intro_sort(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] =p
return insertion_sort(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: Tuple = input('''Enter numbers separated by a comma : ''').strip()
A__: Union[str, Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: int = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A__: Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Optional[int]: # picklable for multiprocessing
return x.sum()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
__UpperCamelCase : int
__UpperCamelCase : str
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
_a : List[str] ={}
_a : Dict =[]
_a : Any =1
_a : Any =[1, 2]
_a : Dict ={"""a""": 1, """b""": 2}
_a : str ={"""a""": [1, 2], """b""": [3, 4]}
_a : Optional[int] ={"""a""": {"""1""": 1}, """b""": 2}
_a : Optional[int] ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
_a : Any ={}
_a : Tuple =[]
_a : Any =2
_a : Optional[Any] =[2, 3]
_a : Union[str, Any] ={"""a""": 2, """b""": 3}
_a : Dict ={"""a""": [2, 3], """b""": [4, 5]}
_a : List[str] ={"""a""": {"""1""": 2}, """b""": 3}
_a : Optional[int] ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
_a : Any =2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
_a : List[str] ={"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
_a : Optional[Any] ={"""a""": 2, """b""": 0, """c""": 2}
_a : Tuple ={
"""a""": np.eye(2 ).astype(SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE : x + 1 , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : List[Any] ={"""a""": 1, """b""": 2}
_a : Dict ={"""a""": 3, """b""": 4}
_a : Dict ={"""a""": 5, """b""": 6}
_a : str =sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
class A__ :
__UpperCamelCase : Optional[Any] = "bar"
_a : str =Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
_a : Any ={F"{i}": i for i in range(_UpperCAmelCase )}
_a : Optional[Any] =map_nested(lambda _UpperCAmelCase : x + 10 ,_UpperCAmelCase ,num_proc=_UpperCAmelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( UpperCAmelCase__ ):
@require_tf
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
_a : List[Any] =layers.Dense(2 )
def gen_random_output():
_a : Union[str, Any] =tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(4_2 , set_tensorflow=SCREAMING_SNAKE_CASE ):
_a : Optional[int] =gen_random_output()
with temp_seed(4_2 , set_tensorflow=SCREAMING_SNAKE_CASE ):
_a : Tuple =gen_random_output()
_a : List[str] =gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
import torch
def gen_random_output():
_a : Dict =torch.nn.Linear(3 , 2 )
_a : Dict =torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(4_2 , set_pytorch=SCREAMING_SNAKE_CASE ):
_a : str =gen_random_output()
with temp_seed(4_2 , set_pytorch=SCREAMING_SNAKE_CASE ):
_a : Any =gen_random_output()
_a : Optional[Any] =gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a : Tuple =gen_random_output()
with temp_seed(4_2 ):
_a : Optional[Any] =gen_random_output()
_a : Dict =gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" ,[{}] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Tuple:
_a : Optional[int] =NestedDataStructure(_UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" ,[
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
_a : List[str] =NestedDataStructure(_UpperCAmelCase ).flatten()
assert output == expected_output
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =A(x=1 ,y="""foobar""" )
_a : int ={"""x""": 1, """y""": """foobar"""}
assert asdict(_UpperCAmelCase ) == expected_output
_a : Union[str, Any] ={"""a""": {"""b""": A(x=10 ,y="""foo""" )}, """c""": [A(x=20 ,y="""bar""" )]}
_a : Any ={"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(_UpperCAmelCase ) == expected_output
with pytest.raises(_UpperCAmelCase ):
asdict([1, A(x=10 ,y="""foo""" )] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> Tuple:
return text.split()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[Any]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
with Pool(2 ) as pool:
_a : Any =list(iflatmap_unordered(_UpperCAmelCase ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a : int =list(iflatmap_unordered(_UpperCAmelCase ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a : Optional[int] =[]
for yield_time, content in iflatmap_unordered(
_UpperCAmelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(_UpperCAmelCase ) == 4
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__: str = logging.get_logger(__name__)
A__: Optional[int] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = "marian"
__UpperCamelCase : Dict = ["past_key_values"]
__UpperCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any]=5_8_1_0_1 , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE :Tuple=1_6 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Any=4_0_9_6 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :int=0.0 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :int=1_0_2_4 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :str=5_8_1_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :str=5_8_1_0_0 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Dict , ) -> Optional[int]:
'''simple docstring'''
_a : str =vocab_size
_a : List[str] =decoder_vocab_size or vocab_size
_a : Optional[Any] =max_position_embeddings
_a : Dict =d_model
_a : List[str] =encoder_ffn_dim
_a : Any =encoder_layers
_a : str =encoder_attention_heads
_a : Optional[Any] =decoder_ffn_dim
_a : Optional[Any] =decoder_layers
_a : Dict =decoder_attention_heads
_a : str =dropout
_a : str =attention_dropout
_a : str =activation_dropout
_a : Tuple =activation_function
_a : List[str] =init_std
_a : List[Any] =encoder_layerdrop
_a : Dict =decoder_layerdrop
_a : Tuple =use_cache
_a : List[Any] =encoder_layers
_a : Optional[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
_a : Any =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class A__ ( UpperCAmelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Optional[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_a : Any ={0: """batch"""}
_a : Tuple ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_a : str ={0: """batch""", 1: """decoder_sequence"""}
_a : Any ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a : str =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_a , _a : Dict =self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
_a : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
_a : List[Any] ={0: """batch""", 2: """past_sequence + sequence"""}
else:
_a : str =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Optional[int] =super().outputs
else:
_a : Optional[int] =super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
_a , _a : Optional[int] =self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
_a : Tuple ={0: """batch""", 2: """past_sequence + sequence"""}
_a : List[str] ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_a : Optional[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
_a : List[Any] =seq_length if not self.use_past else 1
_a : Union[str, Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_a : Union[str, Any] =dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Any =common_inputs["""input_ids"""].shape
_a : Any =common_inputs["""decoder_input_ids"""].shape[1]
_a , _a : List[str] =self.num_attention_heads
_a : Union[str, Any] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a : Union[str, Any] =decoder_seq_length + 3
_a : List[Any] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a : List[Any] =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
_a : List[str] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a : List[Any] =self.num_layers
_a : Any =min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
_a : List[str] ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
_a : Dict =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_a : Optional[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Union[str, Any] =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_a : Union[str, Any] =seqlen + 2
_a , _a : Union[str, Any] =self.num_layers
_a , _a : Tuple =self.num_attention_heads
_a : Any =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a : List[str] =common_inputs["""attention_mask"""].dtype
_a : Optional[int] =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
_a : List[Any] =[
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a : Optional[Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a : Union[str, Any] =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
_a : Any =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_a : Optional[Any] =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a : int =dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Dict =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
_a : Any =self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Any =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
_a : Dict =super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
A__: Union[str, Any] = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , *SCREAMING_SNAKE_CASE :int , **SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__: Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class A__ ( UpperCAmelCase__ ):
def __init__( self :Union[str, Any] , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Tuple=None , **SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
_a , _a : Tuple ={}, {}
if padding is not None:
_a : Optional[Any] =padding
if truncation is not None:
_a : List[Any] =truncation
if top_k is not None:
_a : Optional[int] =top_k
return preprocess_params, {}, postprocess_params
def __call__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union["Image.Image", str] , SCREAMING_SNAKE_CASE :str = None , **SCREAMING_SNAKE_CASE :str ) -> Tuple:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : List[str] ={"""image""": image, """question""": question}
else:
_a : int =image
_a : str =super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Dict=False ) -> Dict:
'''simple docstring'''
_a : Any =load_image(inputs["""image"""] )
_a : Optional[Any] =self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
_a : int =self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[str] ) -> str:
'''simple docstring'''
_a : Optional[int] =self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Any=5 ) -> List[str]:
'''simple docstring'''
if top_k > self.model.config.num_labels:
_a : List[Any] =self.model.config.num_labels
if self.framework == "pt":
_a : List[Any] =model_outputs.logits.sigmoid()[0]
_a , _a : Dict =probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
_a : Union[str, Any] =scores.tolist()
_a : Optional[Any] =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 694 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Optional[int]=3 , SCREAMING_SNAKE_CASE :Tuple=4 , SCREAMING_SNAKE_CASE :Dict=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE :List[Any]=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Dict=3_7 , SCREAMING_SNAKE_CASE :Dict="gelu" , SCREAMING_SNAKE_CASE :Any=1_0 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :List[str]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE :str=[2, 3, 4] , SCREAMING_SNAKE_CASE :int=None , ) -> Optional[int]:
'''simple docstring'''
_a : Tuple =parent
_a : Any =batch_size
_a : Union[str, Any] =image_size
_a : str =num_channels
_a : List[Any] =num_stages
_a : List[Any] =hidden_sizes
_a : Any =depths
_a : List[Any] =is_training
_a : Optional[Any] =use_labels
_a : Any =intermediate_size
_a : Optional[Any] =hidden_act
_a : Optional[Any] =num_labels
_a : Tuple =initializer_range
_a : Any =out_features
_a : Any =out_indices
_a : List[str] =scope
def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] =None
if self.use_labels:
_a : Optional[Any] =ids_tensor([self.batch_size] , self.num_labels )
_a : Union[str, Any] =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : List[Any] =ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : List[str] =model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : List[Any] =model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple =None
_a : str =ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : List[str] =model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : Any =self.prepare_config_and_inputs()
_a , _a , _a : Dict =config_and_inputs
_a : Optional[Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
_a , _a , _a : Optional[int] =config_and_inputs
_a : List[str] ={"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = False
__UpperCamelCase : List[str] = False
def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =ConvNextVaModelTester(self )
_a : Any =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __UpperCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[Any] =True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
_a : int =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
_a : Any =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
_a : int =model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] =False
_a : List[Any] =True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : int =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
_a : Any =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
_a : Optional[int] =model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
_a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str =model_class(SCREAMING_SNAKE_CASE )
_a : Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] =[*signature.parameters.keys()]
_a : Optional[int] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any] ):
_a : Any =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_a : List[Any] =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a : List[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Any =self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple =True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any =True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any =ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
_a : Optional[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(SCREAMING_SNAKE_CASE )
_a : int =self.default_image_processor
_a : Tuple =prepare_img()
_a : Dict =preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : Any =model(**SCREAMING_SNAKE_CASE )
# verify the logits
_a : Optional[int] =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
_a : Tuple =torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 1 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> str:
return getitem, k
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ) -> List[Any]:
return setitem, k, v
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
return delitem, k
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,*_UpperCAmelCase : List[Any] ) -> Dict:
try:
return fun(_UpperCAmelCase ,*_UpperCAmelCase ), None
except Exception as e:
return None, e
A__: int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
A__: int = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
A__: List[str] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
A__: List[str] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
A__: Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
A__: List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" ,(
pytest.param(_add_items ,id="""add items""" ),
pytest.param(_overwrite_items ,id="""overwrite items""" ),
pytest.param(_delete_items ,id="""delete items""" ),
pytest.param(_access_absent_items ,id="""access absent items""" ),
pytest.param(_add_with_resize_up ,id="""add with resize up""" ),
pytest.param(_add_with_resize_down ,id="""add with resize down""" ),
) ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[Any]:
_a : Tuple =HashMap(initial_block_size=4 )
_a : Dict ={}
for _, (fun, *args) in enumerate(_UpperCAmelCase ):
_a , _a : Tuple =_run_operation(_UpperCAmelCase ,_UpperCAmelCase ,*_UpperCAmelCase )
_a , _a : Optional[Any] =_run_operation(_UpperCAmelCase ,_UpperCAmelCase ,*_UpperCAmelCase )
assert my_res == py_res
assert str(_UpperCAmelCase ) == str(_UpperCAmelCase )
assert set(_UpperCAmelCase ) == set(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
def is_public(_UpperCAmelCase : str ) -> bool:
return not name.startswith("""_""" )
_a : Dict ={name for name in dir({} ) if is_public(_UpperCAmelCase )}
_a : Dict ={name for name in dir(HashMap() ) if is_public(_UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 1 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A__: List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A__: Dict = [0, 25, 50]
A__: Tuple = [25, 50, 75]
A__: Optional[int] = fuzz.membership.trimf(X, abca)
A__: List[str] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A__: int = np.ones(75)
A__: int = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
A__: List[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A__: List[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A__: int = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A__: Dict = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A__: List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A__: Union[str, Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A__: Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A__: List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 694 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A__ :
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : Union[str, Any] =num_of_nodes
_a : list[list[int]] =[]
_a : dict[int, int] ={}
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
_a : Union[str, Any] =self.find_component(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
_a : Union[str, Any] =v_node
component_size[v_node] += component_size[u_node]
self.set_component(SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
_a : Union[str, Any] =self.find_component(SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> None:
'''simple docstring'''
_a : Tuple =[]
_a : int =0
_a : list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_a : Optional[int] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_a , _a , _a : List[str] =edge
_a : Any =self.m_component[u]
_a : str =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_a : Optional[Any] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a , _a , _a : Dict =edge
_a : Union[str, Any] =self.m_component[u]
_a : str =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
_a : Optional[int] =[-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
A__: Any = logging.get_logger(__name__)
A__: List[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> Any:
_a : Dict ={
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
_a : Optional[Any] =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_a : str =BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] ,num_layers=predefined_args["""num_layers"""] ,units=predefined_args["""units"""] ,hidden_size=predefined_args["""hidden_size"""] ,max_length=predefined_args["""max_length"""] ,num_heads=predefined_args["""num_heads"""] ,scaled=predefined_args["""scaled"""] ,dropout=predefined_args["""dropout"""] ,output_attention=_UpperCAmelCase ,output_all_encodings=_UpperCAmelCase ,use_residual=predefined_args["""use_residual"""] ,activation=predefined_args.get("""activation""" ,"""gelu""" ) ,layer_norm_eps=predefined_args.get("""layer_norm_eps""" ,_UpperCAmelCase ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_a : Optional[int] ="""openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
_a : Any =os.path.join(get_home_dir() ,"""models""" )
_a : List[Any] =_load_vocab(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,cls=_UpperCAmelCase )
_a : str =nlp.model.BERTModel(
_UpperCAmelCase ,len(_UpperCAmelCase ) ,units=predefined_args["""units"""] ,embed_size=predefined_args["""embed_size"""] ,embed_dropout=predefined_args["""embed_dropout"""] ,word_embed=predefined_args["""word_embed"""] ,use_pooler=_UpperCAmelCase ,use_token_type_embed=_UpperCAmelCase ,token_type_vocab_size=predefined_args["""token_type_vocab_size"""] ,use_classifier=_UpperCAmelCase ,use_decoder=_UpperCAmelCase ,)
original_bort.load_parameters(_UpperCAmelCase ,cast_dtype=_UpperCAmelCase ,ignore_extra=_UpperCAmelCase )
_a : Optional[int] =original_bort._collect_params_with_prefix()
# Build our config 🤗
_a : List[Any] ={
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.0_2,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(_UpperCAmelCase ),
}
_a : int =BertConfig.from_dict(_UpperCAmelCase )
_a : Dict =BertForMaskedLM(_UpperCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_UpperCAmelCase : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ):
_a : Union[str, Any] =hf_param.shape
_a : int =to_torch(params[gluon_param] )
_a : str =gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
_a : Optional[Any] =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"""word_embed.0.weight""" )
_a : Optional[int] =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"""encoder.position_weight""" )
_a : List[Any] =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"""encoder.layer_norm.beta""" )
_a : Any =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"""encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_a : int =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_a : BertLayer =hf_bort_model.bert.encoder.layer[i]
# self attention
_a : BertSelfAttention =layer.attention.self
_a : Union[str, Any] =check_and_map_params(
self_attn.key.bias.data ,F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
_a : Optional[int] =check_and_map_params(
self_attn.key.weight.data ,F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
_a : Optional[Any] =check_and_map_params(
self_attn.query.bias.data ,F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
_a : List[str] =check_and_map_params(
self_attn.query.weight.data ,F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
_a : str =check_and_map_params(
self_attn.value.bias.data ,F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
_a : int =check_and_map_params(
self_attn.value.weight.data ,F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
_a : BertSelfOutput =layer.attention.output
_a : List[Any] =check_and_map_params(
self_output.dense.bias ,F"encoder.transformer_cells.{i}.proj.bias" )
_a : Optional[Any] =check_and_map_params(
self_output.dense.weight ,F"encoder.transformer_cells.{i}.proj.weight" )
_a : List[Any] =check_and_map_params(
self_output.LayerNorm.bias ,F"encoder.transformer_cells.{i}.layer_norm.beta" )
_a : str =check_and_map_params(
self_output.LayerNorm.weight ,F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
_a : BertIntermediate =layer.intermediate
_a : Optional[Any] =check_and_map_params(
intermediate.dense.bias ,F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
_a : str =check_and_map_params(
intermediate.dense.weight ,F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
_a : BertOutput =layer.output
_a : Optional[int] =check_and_map_params(
bert_output.dense.bias ,F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
_a : int =check_and_map_params(
bert_output.dense.weight ,F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
_a : Optional[int] =check_and_map_params(
bert_output.LayerNorm.bias ,F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
_a : Tuple =check_and_map_params(
bert_output.LayerNorm.weight ,F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_a : Dict =RobertaTokenizer.from_pretrained("""roberta-base""" )
_a : str =tokenizer.encode_plus(_UpperCAmelCase )["""input_ids"""]
# Get gluon output
_a : str =mx.nd.array([input_ids] )
_a : Union[str, Any] =original_bort(inputs=_UpperCAmelCase ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCAmelCase )
_a : List[str] =BertModel.from_pretrained(_UpperCAmelCase )
hf_bort_model.eval()
_a : List[str] =tokenizer.encode_plus(_UpperCAmelCase ,return_tensors="""pt""" )
_a : Any =hf_bort_model(**_UpperCAmelCase )[0]
_a : Tuple =output_gluon[0].asnumpy()
_a : int =output_hf[0].detach().numpy()
_a : Dict =np.max(np.abs(hf_layer - gluon_layer ) ).item()
_a : Dict =np.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
A__: str = logging.get_logger(__name__)
A__: Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: str = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
A__: List[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
A__: Optional[int] = {F"funnel-transformer/{name}": 512 for name in _model_names}
A__: List[Any] = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Union[str, Any] = FunnelTokenizer
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = 2
def __init__( self :Any , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Any="<unk>" , SCREAMING_SNAKE_CASE :Any="<sep>" , SCREAMING_SNAKE_CASE :Optional[int]="<pad>" , SCREAMING_SNAKE_CASE :str="<cls>" , SCREAMING_SNAKE_CASE :List[Any]="<mask>" , SCREAMING_SNAKE_CASE :Optional[int]="<s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :Dict="##" , **SCREAMING_SNAKE_CASE :Dict , ) -> str:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , clean_text=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , wordpieces_prefix=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
_a : Optional[Any] =getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
_a : List[Any] =do_lower_case
_a : Union[str, Any] =strip_accents
_a : str =tokenize_chinese_chars
_a : Optional[int] =normalizer_class(**SCREAMING_SNAKE_CASE )
_a : Optional[int] =do_lower_case
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple=None ) -> str:
'''simple docstring'''
_a : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : Union[str, Any] =[self.sep_token_id]
_a : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_a : Dict =self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Union[str, Any] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Union[str, Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A__: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__: int = logging.get_logger(__name__)
A__: int = '''▁'''
A__: List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A__: Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
A__: Dict = {
'''facebook/xglm-564M''': 2048,
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Optional[int]="</s>" , SCREAMING_SNAKE_CASE :Optional[Any]="<s>" , SCREAMING_SNAKE_CASE :Dict="<unk>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> None:
'''simple docstring'''
_a : str ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_a : List[str] =7
_a : Optional[int] =[f"<madeupword{i}>" for i in range(self.num_madeup_words )]
_a : Optional[int] =kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
_a : int =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : Any =1
# Mimic fairseq token-to-id alignment for the first 4 token
_a : Optional[Any] ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_a : int =len(self.sp_model )
_a : Dict ={f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[Any] ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.__dict__.copy()
_a : Dict =None
_a : Optional[int] =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :List[str] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : str ={}
_a : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_a : Any =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE ))
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : Optional[Any] =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __UpperCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : Optional[Any] ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : Any =self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :int ) -> Dict:
'''simple docstring'''
_a : Tuple ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : Optional[int] =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Any =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
A__: Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class A__ ( unittest.TestCase , UpperCAmelCase__ ):
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =load_tool("""text-question-answering""" )
self.tool.setup()
_a : Any =load_tool("""text-question-answering""" , remote=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =self.tool(SCREAMING_SNAKE_CASE , """What did Hugging Face do in April 2021?""" )
self.assertEqual(SCREAMING_SNAKE_CASE , """launched the BigScience Research Workshop""" )
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =self.remote_tool(SCREAMING_SNAKE_CASE , """What did Hugging Face do in April 2021?""" )
self.assertEqual(SCREAMING_SNAKE_CASE , """launched the BigScience Research Workshop""" )
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : List[str] =self.tool(text=SCREAMING_SNAKE_CASE , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(SCREAMING_SNAKE_CASE , """launched the BigScience Research Workshop""" )
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : List[str] =self.remote_tool(text=SCREAMING_SNAKE_CASE , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(SCREAMING_SNAKE_CASE , """launched the BigScience Research Workshop""" )
| 694 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 1 |
'''simple docstring'''
import argparse
import os
import re
A__: Tuple = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
A__: List[Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
A__: Optional[int] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__: Optional[int] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
A__: List[str] = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__: str = re.compile(R'''\[([^\]]+)\]''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> Any:
_a : Dict =_re_indent.search(_UpperCAmelCase )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str]="" ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ) -> int:
_a : List[str] =0
_a : Union[str, Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_UpperCAmelCase ):
index += 1
_a : str =["""\n""".join(lines[:index] )]
else:
_a : Optional[Any] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_a : Optional[int] =[lines[index]]
index += 1
while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_UpperCAmelCase ) )
if index < len(_UpperCAmelCase ) - 1:
_a : Any =[lines[index + 1]]
index += 1
else:
_a : Any =[]
else:
blocks.append("""\n""".join(_UpperCAmelCase ) )
_a : List[Any] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_UpperCAmelCase ) > 0:
blocks.append("""\n""".join(_UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_UpperCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> Dict:
def _inner(_UpperCAmelCase : Tuple ):
return key(_UpperCAmelCase ).lower().replace("""_""" ,"""""" )
return _inner
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : int=None ) -> Optional[Any]:
# If no key is provided, we use a noop.
def noop(_UpperCAmelCase : List[str] ):
return x
if key is None:
_a : Dict =noop
# Constants are all uppercase, they go first.
_a : str =[obj for obj in objects if key(_UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_a : List[str] =[obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_a : int =[obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()]
_a : List[Any] =ignore_underscore(_UpperCAmelCase )
return sorted(_UpperCAmelCase ,key=_UpperCAmelCase ) + sorted(_UpperCAmelCase ,key=_UpperCAmelCase ) + sorted(_UpperCAmelCase ,key=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Optional[int]:
# This inner function sort imports between [ ].
def _replace(_UpperCAmelCase : List[Any] ):
_a : List[Any] =match.groups()[0]
if "," not in imports:
return F"[{imports}]"
_a : str =[part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a : int =keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_UpperCAmelCase )] ) + "]"
_a : Optional[Any] =import_statement.split("""\n""" )
if len(_UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_a : Optional[Any] =2 if lines[1].strip() == """[""" else 1
_a : Tuple =[(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_a : Dict =sort_objects(_UpperCAmelCase ,key=lambda _UpperCAmelCase : x[1] )
_a : int =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_a : List[Any] =_re_bracket_content.sub(_replace ,lines[1] )
else:
_a : Tuple =[part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a : Optional[Any] =keys[:-1]
_a : List[str] =get_indent(lines[1] ) + """, """.join([F"\"{k}\"" for k in sort_objects(_UpperCAmelCase )] )
return "\n".join(_UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
_a : int =_re_bracket_content.sub(_replace ,_UpperCAmelCase )
return import_statement
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple=True ) -> int:
with open(_UpperCAmelCase ,"""r""" ) as f:
_a : int =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_a : Optional[int] =split_code_in_indented_blocks(
_UpperCAmelCase ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(_UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_a : Dict =main_blocks[block_idx]
_a : Tuple =block.split("""\n""" )
# Get to the start of the imports.
_a : Tuple =0
while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_a : List[str] =len(_UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(_UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_a : Tuple ="""\n""".join(block_lines[line_idx:-1] )
_a : List[Any] =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_a : List[Any] =split_code_in_indented_blocks(_UpperCAmelCase ,indent_level=_UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_a : Optional[int] =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_a : str =[(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_a : List[Any] =[(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None]
_a : List[str] =[x[0] for x in sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_a : Union[str, Any] =0
_a : Any =[]
for i in range(len(_UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_a : Tuple =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
_a : Optional[int] ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_UpperCAmelCase ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_UpperCAmelCase ,"""w""" ) as f:
f.write("""\n""".join(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any]=True ) -> str:
_a : str =[]
for root, _, files in os.walk(_UpperCAmelCase ):
if "__init__.py" in files:
_a : List[str] =sort_imports(os.path.join(_UpperCAmelCase ,"""__init__.py""" ) ,check_only=_UpperCAmelCase )
if result:
_a : List[str] =[os.path.join(_UpperCAmelCase ,"""__init__.py""" )]
if len(_UpperCAmelCase ) > 0:
raise ValueError(F"Would overwrite {len(_UpperCAmelCase )} files, run `make style`." )
if __name__ == "__main__":
A__: str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
A__: Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 694 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[str] = "philschmid/bart-large-cnn-samsum"
__UpperCamelCase : int = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__UpperCamelCase : Union[str, Any] = "summarizer"
__UpperCamelCase : List[str] = AutoTokenizer
__UpperCamelCase : Dict = AutoModelForSeqaSeqLM
__UpperCamelCase : List[str] = ["text"]
__UpperCamelCase : Tuple = ["text"]
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" , truncation=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(**SCREAMING_SNAKE_CASE )[0]
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Dict ) -> Dict:
'''simple docstring'''
return self.pre_processor.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[Any] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
A__: Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
A__: Any = '''sshleifer/student_marian_en_ro_6_1'''
A__: str = '''sshleifer/tiny-mbart'''
@require_torch
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Optional[Any]=True , ) -> List[Any]:
'''simple docstring'''
_a : Dict =self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , )
_a : List[Any] =TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_a : Optional[Any] =[log for log in logs if """eval_loss""" in log.keys()]
_a : Tuple =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_a : Optional[int] =eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , SCREAMING_SNAKE_CASE )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE )
@require_torch_multi_gpu
def __UpperCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __UpperCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=SCREAMING_SNAKE_CASE )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __UpperCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=SCREAMING_SNAKE_CASE )
@require_apex
@require_torch_gpu
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple ) -> Dict:
'''simple docstring'''
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_a : Dict ={
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_a : Optional[int] =experiments[experiment_id]
_a : Optional[Any] ={"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_a : List[Any] ="""Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE , extra_args_str=data["""extra_args_str"""] )
_a : List[str] =len(re.findall(SCREAMING_SNAKE_CASE , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE , data["""n_matches"""] )
@slow
def __UpperCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
_a : Tuple =self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=1_0 , distributed=SCREAMING_SNAKE_CASE , )
# Check metrics
_a : Optional[Any] =TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , """trainer_state.json""" ) ).log_history
_a : List[Any] =[log for log in logs if """eval_loss""" in log.keys()]
_a : str =eval_metrics[0]
_a : Dict =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , SCREAMING_SNAKE_CASE )
# test if do_predict saves generations and metrics
_a : Dict =os.listdir(SCREAMING_SNAKE_CASE )
_a : List[str] ={os.path.basename(SCREAMING_SNAKE_CASE ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE :str ) -> Tuple[int, float]:
_a : List[Any] ="""--skip_memory_metrics 0"""
_a : str =self.run_trainer(
max_len=1_2_8 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , )
# Check metrics
_a : Optional[int] =TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE , """trainer_state.json""" ) ).log_history
_a : List[Any] =int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 )
_a : Dict =int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 )
_a : Optional[int] =logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_a , _a , _a : List[Any] =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_a , _a , _a : Optional[Any] =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_a : Optional[int] =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_a : Optional[int] =gpu_peak_mem_orig + gpu_alloc_mem_orig
_a : List[Any] =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_a : Union[str, Any] =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_a : Tuple =1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :float = 3e-3 , SCREAMING_SNAKE_CASE :str = "adafactor" , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :int = 0 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :int = None , ) -> Union[str, Any]:
'''simple docstring'''
_a : List[Any] =self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_a : Optional[int] =self.get_auto_remove_tmp_dir()
_a : Tuple =f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(SCREAMING_SNAKE_CASE )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_a : Union[str, Any] =f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(SCREAMING_SNAKE_CASE )}\n ".split()
_a : Dict ="""
--do_predict
""".split()
_a : str =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_a : List[str] =get_gpu_count()
_a : Any =get_torch_dist_unique_port()
_a : str =f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_a : Union[str, Any] =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
else:
_a : Any =["""run_translation.py"""] + args
with patch.object(SCREAMING_SNAKE_CASE , """argv""" , SCREAMING_SNAKE_CASE ):
main()
return output_dir
| 694 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : Tuple =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class A__ :
def __init__( self :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :str=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=3 , SCREAMING_SNAKE_CASE :List[Any]=3_2 , SCREAMING_SNAKE_CASE :str=0.25 , SCREAMING_SNAKE_CASE :Optional[int]=8 , SCREAMING_SNAKE_CASE :Union[str, Any]=8 , SCREAMING_SNAKE_CASE :Optional[int]=6 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Tuple="relu6" , SCREAMING_SNAKE_CASE :str=1_2_8_0 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.02 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=None , ) -> Any:
'''simple docstring'''
_a : Dict =parent
_a : Union[str, Any] =batch_size
_a : Union[str, Any] =num_channels
_a : int =image_size
_a : str =depth_multiplier
_a : str =depth_divisible_by
_a : int =min_depth
_a : List[str] =expand_ratio
_a : str =tf_padding
_a : List[str] =output_stride
_a : Union[str, Any] =first_layer_is_expansion
_a : Any =finegrained_output
_a : Optional[Any] =hidden_act
_a : Tuple =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_a : List[Any] =classifier_dropout_prob
_a : str =use_labels
_a : List[str] =is_training
_a : Optional[int] =num_labels
_a : Any =initializer_range
_a : Union[str, Any] =scope
def __UpperCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
_a : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[str] =None
_a : List[Any] =None
if self.use_labels:
_a : Tuple =ids_tensor([self.batch_size] , self.num_labels )
_a : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : List[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =MobileNetVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Dict =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :int ) -> List[str]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : int =model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.num_labels
_a : List[Any] =MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : int =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Dict =model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Union[str, Any] =config_and_inputs
_a : int ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : int = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a : Tuple =MobileNetVaModelTester(self )
_a : Any =MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
_a , _a : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict =model_class(SCREAMING_SNAKE_CASE )
_a : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] =[*signature.parameters.keys()]
_a : Union[str, Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple ):
_a : Dict =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_a : Dict =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a : Dict =outputs.hidden_states
_a : str =1_6
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
_a , _a : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] =True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] =True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
_a : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
_a : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : Any =MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(SCREAMING_SNAKE_CASE )
_a : List[str] =self.default_image_processor
_a : List[str] =prepare_img()
_a : List[str] =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : str =model(**SCREAMING_SNAKE_CASE )
# verify the logits
_a : Dict =torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
_a : Optional[int] =torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple =MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
_a : Any =model.to(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
_a : str =prepare_img()
_a : List[str] =image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : Any =model(**SCREAMING_SNAKE_CASE )
_a : Dict =outputs.logits
# verify the logits
_a : List[str] =torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
_a : int =torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : float ,_UpperCAmelCase : bool = False ) -> dict:
_a : dict ={i: [] for i in range(_UpperCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCAmelCase ):
for j in range(i + 1 ,_UpperCAmelCase ):
if random.random() < probability:
graph[i].append(_UpperCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCAmelCase )
return graph
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> dict:
return {
i: [j for j in range(_UpperCAmelCase ) if i != j] for i in range(_UpperCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A__ :
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 6 ) -> None:
'''simple docstring'''
_a : Node | None =None
_a : Node | None =None
self.create_linked_list(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : str =Node()
_a : Dict =current_node
_a : int =current_node
_a : str =current_node
for _ in range(1 , SCREAMING_SNAKE_CASE ):
_a : Any =Node()
_a : int =current_node
_a : Optional[Any] =previous_node
_a : int =current_node
_a : int =self.front
_a : List[Any] =previous_node
def __UpperCAmelCase ( self :List[str] ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __UpperCAmelCase ( self :str ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_a : str =self.rear.next
if self.rear:
_a : Optional[Any] =data
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_a : Dict =self.front.data
_a : str =None
return data
_a : List[Any] =self.front
_a : List[str] =old_front.next
_a : Optional[Any] =old_front.data
_a : List[str] =None
return data
def __UpperCAmelCase ( self :Optional[int] ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def __UpperCAmelCase ( self :Any ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class A__ :
def __init__( self :Union[str, Any] ) -> None:
'''simple docstring'''
_a : Any | None =None
_a : Node | None =None
_a : Node | None =None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A__: List[Any] = logging.get_logger(__name__)
set_seed(770)
A__: int = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
A__: str = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
A__: str = os.path.dirname(os.path.abspath(__file__))
A__: List[str] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
A__: List[str] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any]=False ) -> int:
_a : List[str] =model_type
if use_small:
key += "_small"
return os.path.join(_UpperCAmelCase ,REMOTE_MODEL_PATHS[key]["""file_name"""] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Tuple ) -> List[Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
hf_hub_download(repo_id=_UpperCAmelCase ,filename=_UpperCAmelCase ,local_dir=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : str="text" ) -> Any:
if model_type == "text":
_a : List[Any] =BarkSemanticModel
_a : List[str] =BarkSemanticConfig
_a : Any =BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Optional[Any] =BarkCoarseModel
_a : Optional[Any] =BarkCoarseConfig
_a : Any =BarkCoarseGenerationConfig
elif model_type == "fine":
_a : Any =BarkFineModel
_a : str =BarkFineConfig
_a : List[str] =BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Optional[Any] =F"{model_type}_small" if use_small else model_type
_a : Optional[int] =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_UpperCAmelCase ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["""repo_id"""] ,model_info["""file_name"""] )
_a : Dict =torch.load(_UpperCAmelCase ,map_location=_UpperCAmelCase )
# this is a hack
_a : Dict =checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_a : Dict =model_args["""vocab_size"""]
_a : List[Any] =model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : Optional[Any] =model_args.pop("""n_head""" )
_a : str =model_args.pop("""n_embd""" )
_a : Union[str, Any] =model_args.pop("""n_layer""" )
_a : Any =ConfigClass(**checkpoint["""model_args"""] )
_a : Union[str, Any] =ModelClass(config=_UpperCAmelCase )
_a : Union[str, Any] =GenerationConfigClass()
_a : Any =model_generation_config
_a : Dict =checkpoint["""model"""]
# fixup checkpoint
_a : Optional[int] ="""_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(_UpperCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
_a : Any =k[len(_UpperCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
_a : Optional[Any] =new_k.replace(_UpperCAmelCase ,new_layer_name_dict[old_layer_name] )
_a : Optional[Any] =state_dict.pop(_UpperCAmelCase )
_a : Dict =set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : str ={k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_a : List[str] =set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] ={k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(_UpperCAmelCase ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(_UpperCAmelCase ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(_UpperCAmelCase ,strict=_UpperCAmelCase )
_a : List[str] =model.num_parameters(exclude_embeddings=_UpperCAmelCase )
_a : str =checkpoint["""best_val_loss"""].item()
logger.info(F"model loaded: {round(n_params/1e6 ,1 )}M params, {round(_UpperCAmelCase ,3 )} loss" )
model.eval()
model.to(_UpperCAmelCase )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[Any]=False ,_UpperCAmelCase : Optional[int]="text" ) -> Optional[int]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : str ="""cpu""" # do conversion on cpu
_a : Any =_get_ckpt_path(_UpperCAmelCase ,use_small=_UpperCAmelCase )
_a : str =_load_model(_UpperCAmelCase ,_UpperCAmelCase ,model_type=_UpperCAmelCase ,use_small=_UpperCAmelCase )
# load bark initial model
_a : Dict =_bark_load_model(_UpperCAmelCase ,"""cpu""" ,model_type=_UpperCAmelCase ,use_small=_UpperCAmelCase )
if model_type == "text":
_a : Dict =bark_model["""model"""]
if model.num_parameters(exclude_embeddings=_UpperCAmelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_a : Optional[Any] =5
_a : Union[str, Any] =10
if model_type in ["text", "coarse"]:
_a : Optional[Any] =torch.randint(256 ,(batch_size, sequence_length) ,dtype=torch.int )
_a : int =bark_model(_UpperCAmelCase )[0]
_a : Optional[int] =model(_UpperCAmelCase )
# take last logits
_a : Dict =output_new_model_total.logits[:, [-1], :]
else:
_a : Any =3
_a : Optional[int] =8
_a : Any =torch.randint(256 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int )
_a : Optional[int] =model(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =bark_model(_UpperCAmelCase ,_UpperCAmelCase )
_a : str =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ,) -> Optional[int]:
_a : Optional[Any] =os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =BarkSemanticConfig.from_pretrained(os.path.join(_UpperCAmelCase ,"""config.json""" ) )
_a : Dict =BarkCoarseConfig.from_pretrained(os.path.join(_UpperCAmelCase ,"""config.json""" ) )
_a : List[Any] =BarkFineConfig.from_pretrained(os.path.join(_UpperCAmelCase ,"""config.json""" ) )
_a : str =EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_a : Optional[Any] =BarkSemanticModel.from_pretrained(_UpperCAmelCase )
_a : int =BarkCoarseModel.from_pretrained(_UpperCAmelCase )
_a : Dict =BarkFineModel.from_pretrained(_UpperCAmelCase )
_a : int =EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_a : Tuple =BarkConfig.from_sub_model_configs(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config )
_a : List[str] =BarkModel(_UpperCAmelCase )
_a : List[Any] =semantic
_a : Any =coarseAcoustic
_a : Optional[int] =fineAcoustic
_a : Dict =codec
_a : str =bark_generation_config
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
bark.save_pretrained(_UpperCAmelCase ,repo_id=_UpperCAmelCase ,push_to_hub=_UpperCAmelCase )
if __name__ == "__main__":
A__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
A__: List[Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A__: Tuple = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = ["pixel_values"]
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Any=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
_a : Union[str, Any] =do_resize
_a : Tuple =do_rescale
_a : Optional[Any] =size_divisor
_a : List[str] =resample
super().__init__(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE :Any ) -> np.ndarray:
'''simple docstring'''
_a , _a : List[Any] =get_image_size(SCREAMING_SNAKE_CASE )
# Rounds the height and width down to the closest multiple of size_divisor
_a : List[Any] =height // size_divisor * size_divisor
_a : str =width // size_divisor * size_divisor
_a : int =resize(SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return image
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE :Dict ) -> np.ndarray:
'''simple docstring'''
return rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :str , ) -> BatchFeature:
'''simple docstring'''
_a : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
_a : int =size_divisor if size_divisor is not None else self.size_divisor
_a : Optional[int] =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : Dict =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for img in images]
if do_resize:
_a : str =[self.resize(SCREAMING_SNAKE_CASE , size_divisor=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_a : Optional[Any] =[self.rescale(SCREAMING_SNAKE_CASE , scale=1 / 2_5_5 ) for image in images]
_a : int =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str ={"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ) -> Dict:
# Initialise PyTorch model
_a : Any =TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_a : int =TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 694 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A__: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A__: list[int] = [ord(letter) for letter in string.ascii_lowercase]
A__: set[int] = {ord(char) for char in VALID_CHARS}
A__: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : tuple[int, ...] ) -> str | None:
_a : str =""
_a : int
_a : int
_a : int
for keychar, cipherchar in zip(cycle(_UpperCAmelCase ) ,_UpperCAmelCase ):
_a : Optional[Any] =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase )
return decoded
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ) -> list[str]:
_a : list[str] =[]
for key in product(_UpperCAmelCase ,repeat=3 ):
_a : Dict =try_key(_UpperCAmelCase ,_UpperCAmelCase )
if encoded is not None:
possibles.append(_UpperCAmelCase )
return possibles
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[str] ,_UpperCAmelCase : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str = "p059_cipher.txt" ) -> int:
_a : list[int]
_a : list[str]
_a : str
_a : str
_a : str =Path(_UpperCAmelCase ).parent.joinpath(_UpperCAmelCase ).read_text(encoding="""utf-8""" )
_a : Dict =[int(_UpperCAmelCase ) for number in data.strip().split(""",""" )]
_a : Any =filter_valid_chars(_UpperCAmelCase )
for common_word in COMMON_WORDS:
_a : Tuple =filter_common_word(_UpperCAmelCase ,_UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
break
_a : int =possibles[0]
return sum(ord(_UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A__: Tuple = ''''''
A__: List[str] = ''''''
A__: Optional[int] = ''''''
A__: List[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_a , _a : Optional[Any] =get_dataset(_UpperCAmelCase ,_UpperCAmelCase )
print("""Processing...""" )
_a , _a , _a : int =update_image_and_anno(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for index, image in enumerate(_UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_a : Optional[int] =random_chars(32 )
_a : Dict =paths[index].split(os.sep )[-1].rsplit(""".""" ,1 )[0]
_a : int =F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" ,_UpperCAmelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(_UpperCAmelCase )} with {file_name}" )
_a : List[str] =[]
for anno in new_annos[index]:
_a : Optional[Any] =F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_UpperCAmelCase )
with open(F"/{file_root}.txt" ,"""w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> tuple[list, list]:
_a : Any =[]
_a : List[str] =[]
for label_file in glob.glob(os.path.join(_UpperCAmelCase ,"""*.txt""" ) ):
_a : str =label_file.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
with open(_UpperCAmelCase ) as in_file:
_a : int =in_file.readlines()
_a : Optional[Any] =os.path.join(_UpperCAmelCase ,F"{label_name}.jpg" )
_a : List[str] =[]
for obj_list in obj_lists:
_a : List[str] =obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ,_UpperCAmelCase : int = 1 ) -> tuple[list, list, list]:
_a : Any =[]
_a : Dict =[]
_a : str =[]
for idx in range(len(_UpperCAmelCase ) ):
_a : Dict =[]
_a : str =img_list[idx]
path_list.append(_UpperCAmelCase )
_a : Union[str, Any] =anno_list[idx]
_a : int =cva.imread(_UpperCAmelCase )
if flip_type == 1:
_a : Any =cva.flip(_UpperCAmelCase ,_UpperCAmelCase )
for bbox in img_annos:
_a : Tuple =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_a : int =cva.flip(_UpperCAmelCase ,_UpperCAmelCase )
for bbox in img_annos:
_a : Union[str, Any] =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCAmelCase )
new_imgs_list.append(_UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_a : Dict =ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Callable ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> np.ndarray:
_a : Union[str, Any] =int(np.ceil((x_end - xa) / step_size ) )
_a : Optional[Any] =np.zeros((n + 1,) )
_a : Optional[Any] =ya
_a : str =xa
for k in range(_UpperCAmelCase ):
_a : Tuple =y[k] + step_size * ode_func(_UpperCAmelCase ,y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_a : str =str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_a : Tuple =str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_a : Tuple =max(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) ,b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[str] = (UniPCMultistepScheduler,)
__UpperCamelCase : Optional[Any] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self :int , **SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict=0 , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a : int =dict(self.forward_default_kwargs )
_a : Tuple =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : str =self.dummy_sample
_a : Dict =0.1 * sample
_a : Any =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : str =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : List[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : Optional[Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : List[str] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : Tuple =sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
_a : List[Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : str =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int=0 , **SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : str =dict(self.forward_default_kwargs )
_a : List[Any] =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Optional[int] =self.dummy_sample
_a : Union[str, Any] =0.1 * sample
_a : Optional[Any] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Tuple =self.get_scheduler_config()
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
_a : int =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Optional[int] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a : str =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : List[Any] =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[Any]=None , **SCREAMING_SNAKE_CASE :str ) -> Tuple:
'''simple docstring'''
if scheduler is None:
_a : Tuple =self.scheduler_classes[0]
_a : Tuple =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : int =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Tuple =self.scheduler_classes[0]
_a : List[str] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =1_0
_a : Dict =self.dummy_model()
_a : Dict =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : Any =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =dict(self.forward_default_kwargs )
_a : List[Any] =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
_a : str =self.get_scheduler_config()
_a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : int =self.dummy_sample
_a : Any =0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , """set_timesteps""" ):
_a : List[str] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : str =[residual + 0.2, residual + 0.15, residual + 0.10]
_a : Optional[int] =dummy_past_residuals[: scheduler.config.solver_order]
_a : Any =scheduler.timesteps[5]
_a : Any =scheduler.timesteps[6]
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : Optional[Any] =UniPCMultistepScheduler(**self.get_scheduler_config() )
_a : List[str] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : Any =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
_a : Union[str, Any] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_a : List[str] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : Optional[Any] =UniPCMultistepScheduler.from_config(scheduler.config )
_a : Optional[int] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : List[str] =self.full_loop()
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : int =self.full_loop(prediction_type="""v_prediction""" )
_a : Tuple =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] =self.scheduler_classes[0]
_a : Union[str, Any] =self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
_a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : str =1_0
_a : str =self.dummy_model()
_a : Optional[Any] =self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : List[Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __UpperCAmelCase ( self :int , **SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_a : Optional[Any] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A__: Optional[int] = logging.get_logger(__name__)
A__: str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
A__: Any = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
A__: Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[str] = RealmTokenizer
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[Any]="[UNK]" , SCREAMING_SNAKE_CASE :Union[str, Any]="[SEP]" , SCREAMING_SNAKE_CASE :str="[PAD]" , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :List[Any]="[MASK]" , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :int=None , **SCREAMING_SNAKE_CASE :Dict , ) -> str:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
_a : Optional[int] =getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
_a : Optional[int] =do_lower_case
_a : Optional[Any] =strip_accents
_a : str =tokenize_chinese_chars
_a : List[Any] =normalizer_class(**SCREAMING_SNAKE_CASE )
_a : int =do_lower_case
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , **SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =PaddingStrategy.MAX_LENGTH
_a : List[Any] =text
_a : str =kwargs.pop("""text_pair""" , SCREAMING_SNAKE_CASE )
_a : Dict =kwargs.pop("""return_tensors""" , SCREAMING_SNAKE_CASE )
_a : int ={
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
_a : Optional[int] =batch_text_pair[idx]
else:
_a : List[Any] =None
_a : Dict =super().__call__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[int] =encoded_candidates.get("""input_ids""" )
_a : int =encoded_candidates.get("""attention_mask""" )
_a : int =encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(SCREAMING_SNAKE_CASE )
_a : Dict ={key: item for key, item in output_data.items() if len(SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str=None ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : Optional[Any] =[self.sep_token_id]
_a : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_a : str =self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list[int | float]] ) -> int:
_a : str =len(_UpperCAmelCase )
_a : List[str] =len(matrix[0] )
_a : Dict =min(_UpperCAmelCase ,_UpperCAmelCase )
for row in range(_UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 ,_UpperCAmelCase ):
_a : List[Any] =matrix[col][row] / matrix[row][row]
for i in range(_UpperCAmelCase ,_UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_a : Tuple =True
for i in range(row + 1 ,_UpperCAmelCase ):
if matrix[i][row] != 0:
_a , _a : Union[str, Any] =matrix[i], matrix[row]
_a : List[str] =False
break
if reduce:
rank -= 1
for i in range(_UpperCAmelCase ):
_a : str =matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ,_UpperCAmelCase : list[float] ) -> float:
_a : List[str] =sorted(numsa + numsa )
_a , _a : Any =divmod(len(_UpperCAmelCase ) ,2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: str = [float(x) for x in input('''Enter the elements of first array: ''').split()]
A__: str = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 694 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 1 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
__UpperCamelCase : List[str] = 1
@register_to_config
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int = 1_0_0_0 , SCREAMING_SNAKE_CASE :Optional[Union[np.ndarray, List[float]]] = None ) -> Optional[Any]:
'''simple docstring'''
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
_a : Dict =1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_a : int =4
# running values
_a : Optional[int] =[]
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, torch.device] = None ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[Any] =num_inference_steps
_a : str =torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_a : Optional[int] =torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_a : int =torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_a : Any =torch.sin(steps * math.pi / 2 ) ** 2
_a : str =(1.0 - self.betas**2) ** 0.5
_a : int =(torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_a : Any =timesteps.to(SCREAMING_SNAKE_CASE )
_a : Tuple =[]
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
_a : Optional[Any] =(self.timesteps == timestep).nonzero().item()
_a : int =timestep_index + 1
_a : Any =sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
_a : str =self.ets[-1]
elif len(self.ets ) == 2:
_a : Tuple =(3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_a : int =(2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
_a : Optional[Any] =(1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
_a : Dict =self._get_prev_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :torch.FloatTensor , *SCREAMING_SNAKE_CASE :Tuple , **SCREAMING_SNAKE_CASE :List[Any] ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =self.alphas[timestep_index]
_a : List[Any] =self.betas[timestep_index]
_a : int =self.alphas[prev_timestep_index]
_a : Optional[int] =self.betas[prev_timestep_index]
_a : int =(sample - sigma * ets) / max(SCREAMING_SNAKE_CASE , 1e-8 )
_a : Dict =next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.config.num_train_timesteps
| 694 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 1 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A__: List[str] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
A__: Dict = parser.parse_args()
A__: List[str] = '''cpu'''
A__: Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
A__: List[Any] = '''path-to-your-trained-model'''
A__: Optional[int] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A__: Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A__: Union[str, Any] = pipe.to(device)
# to channels last
A__: Dict = pipe.unet.to(memory_format=torch.channels_last)
A__: Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
A__: Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A__: Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A__: Any = torch.randn(2, 4, 64, 64)
A__: Dict = torch.rand(1) * 999
A__: int = torch.randn(2, 77, 768)
A__: Any = (sample, timestep, encoder_hidden_status)
try:
A__: List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A__: Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A__: List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A__: List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A__: Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A__: List[Any] = 666
A__: str = torch.Generator(device).manual_seed(seed)
A__: int = {'''generator''': generator}
if args.steps is not None:
A__: int = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A__: Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> List[str]:
'''simple docstring'''
_a : List[str] =3
_a : Optional[int] =2_5_0
_a : Union[str, Any] =ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE )
_a : List[str] =torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE , dtype=torch.float ) / length
return input_ids, scores
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a , _a : Dict =self._get_tensors(5 )
_a : int =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a , _a : List[str] =self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a , _a : Any =self._get_tensors(1_0 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : Dict =MaxLengthCriteria(max_length=1_0 )
_a , _a : Tuple =self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a , _a : List[Any] =self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a , _a : int =self._get_tensors(1_0 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_a , _a : Union[str, Any] =self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a , _a : List[str] =self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a , _a : List[Any] =self._get_tensors(1_0 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a : List[str] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
_a , _a : str =self._get_tensors(5 )
_a : Union[str, Any] =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a : str =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(SCREAMING_SNAKE_CASE ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
_a : Union[str, Any] =validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
if isinstance(_UpperCAmelCase ,collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int] ) -> str:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :str=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[str] =TFVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE )
_a : List[Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any]=None , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
'''simple docstring'''
_a , _a : str =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Tuple =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a , _a : Any =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={"""vision_model""": vision_model, """text_model""": text_model}
_a : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE )
_a : List[Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[str]=None , **SCREAMING_SNAKE_CASE :int ) -> Union[str, Any]:
'''simple docstring'''
_a , _a : int =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Optional[int] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : str =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : int =after_output[0].numpy()
_a : Union[str, Any] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-5 )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any]=None , **SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
_a , _a : List[Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Tuple =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(
input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
_a : int =output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Tuple =to_atuple(vision_model.config.image_size )
_a : str =to_atuple(vision_model.config.patch_size )
_a : List[str] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a : Dict =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a : Tuple =output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :float ) -> int:
'''simple docstring'''
_a : List[Any] =np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f"Difference between torch and flax is {diff} (>= {tol})." )
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
_a : Dict =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : Union[str, Any] =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
_a : List[str] =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
_a , _a : Tuple =self.get_pretrained_model_and_inputs()
_a : Tuple =model_a(**SCREAMING_SNAKE_CASE )
_a : str =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =model_a(**SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =after_outputs[0].numpy()
_a : int =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-5 )
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
_a : Tuple =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_a : List[str] =1_3
_a : Optional[Any] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Any =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : int =random_attention_mask([batch_size, 4] )
_a : List[str] ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Optional[int] =TFViTModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : List[str] =TFBertModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =TFViTModelTester(self )
_a : Any =TFBertModelTester(self )
_a : Optional[int] =vit_model_tester.prepare_config_and_inputs()
_a : List[Any] =bert_model_tester.prepare_config_and_inputs()
_a , _a , _a : str =vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_a : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_a : Dict =1_3
_a : str =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Tuple =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : Tuple =random_attention_mask([batch_size, 4] )
_a : int ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict=None , **SCREAMING_SNAKE_CASE :List[str] ) -> str:
'''simple docstring'''
_a , _a : Optional[Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : List[Any] =model(
input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
_a : Any =output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a : Optional[Any] =to_atuple(vision_model.config.image_size )
_a : Optional[int] =to_atuple(vision_model.config.patch_size )
_a : Dict =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a : List[str] =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a : str =output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
'''simple docstring'''
_a : List[Any] =TFDeiTModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : Any =TFRobertaModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
_a : Union[str, Any] =TFDeiTModelTester(self )
_a : List[Any] =TFRobertaModelTester(self )
_a : List[str] =vit_model_tester.prepare_config_and_inputs()
_a : Dict =bert_model_tester.prepare_config_and_inputs()
_a , _a , _a : Any =vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[Any] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
_a : str =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_a : List[str] =1_3
_a : Optional[Any] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : List[Any] =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : Dict =random_attention_mask([batch_size, 4] )
_a : str ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
'''simple docstring'''
_a : int =TFCLIPVisionModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : Any =TFBertModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =TFCLIPVisionModelTester(self )
_a : Any =TFBertModelTester(self )
_a : Union[str, Any] =clip_model_tester.prepare_config_and_inputs()
_a : List[Any] =bert_model_tester.prepare_config_and_inputs()
_a , _a : Union[str, Any] =vision_config_and_inputs
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
_a : Any =TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=SCREAMING_SNAKE_CASE )
_a : Tuple =VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_a : List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_a : List[str] =processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="""np""" )
_a : Dict =model(**SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a : str =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 694 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A__: int = None
A__: List[Any] = logging.get_logger(__name__)
A__: Tuple = '''▁'''
A__: Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__: List[str] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
A__: Tuple = {
'''google/pegasus-xsum''': 512,
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = PegasusTokenizer
__UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self :int , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE :List[Any]="</s>" , SCREAMING_SNAKE_CASE :List[Any]="<unk>" , SCREAMING_SNAKE_CASE :Dict="<mask_2>" , SCREAMING_SNAKE_CASE :Any="<mask_1>" , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_3 , **SCREAMING_SNAKE_CASE :Dict , ) -> Tuple:
'''simple docstring'''
_a : List[str] =offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError(
f"additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE )}, but is"
f" {type(SCREAMING_SNAKE_CASE )}" )
_a : List[Any] =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
_a : Optional[int] =additional_special_tokens_extended
else:
_a : int =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , mask_token_sent=SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : Optional[Any] =False if not self.vocab_file else True
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List , SCREAMING_SNAKE_CASE :Optional[List] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : str =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import numpy
class A__ :
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :numpy.ndarray , SCREAMING_SNAKE_CASE :numpy.ndarray ) -> None:
'''simple docstring'''
_a : Dict =input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_a : Any =numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_a : Any =numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_a : int =numpy.random.rand(3 , 1 )
# Real output values provided.
_a : Union[str, Any] =output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_a : int =numpy.zeros(output_array.shape )
def __UpperCAmelCase ( self :int ) -> numpy.ndarray:
'''simple docstring'''
_a : int =sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_a : Tuple =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_a : Optional[int] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __UpperCAmelCase ( self :Any ) -> None:
'''simple docstring'''
_a : List[str] =numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_a : Dict =numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_a : Tuple =numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :numpy.ndarray , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :bool ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
_a : Optional[int] =self.feedforward()
self.back_propagation()
if give_loss:
_a : Optional[Any] =numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :numpy.ndarray ) -> int:
'''simple docstring'''
_a : str =input_arr
_a : Optional[Any] =sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_a : Dict =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_a : str =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_a : Tuple =numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
_a : Tuple =numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
_a : Union[str, Any] =TwoHiddenLayerNeuralNetwork(
input_array=_UpperCAmelCase ,output_array=_UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_UpperCAmelCase ,iterations=10 ,give_loss=_UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 694 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
A__: int = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :Callable , SCREAMING_SNAKE_CASE :Optional[Features] = None , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[dict] = None , SCREAMING_SNAKE_CASE :Optional[int] = None , **SCREAMING_SNAKE_CASE :int , ) -> List[Any]:
'''simple docstring'''
super().__init__(
features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE , streaming=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : int =Generator(
cache_dir=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , gen_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
_a : Optional[Any] =self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
_a : Tuple =None
_a : Tuple =None
_a : Any =None
_a : Tuple =None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE , download_mode=SCREAMING_SNAKE_CASE , verification_mode=SCREAMING_SNAKE_CASE , base_path=SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
_a : Any =self.builder.as_dataset(
split="""train""" , verification_mode=SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =np.full((len(_UpperCAmelCase ), sequence_length, 2) ,_UpperCAmelCase )
else:
_a : List[str] =np.full((len(_UpperCAmelCase ), sequence_length) ,_UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =tensor[:sequence_length]
else:
_a : Optional[Any] =tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] =tensor[:sequence_length]
else:
_a : Optional[int] =tensor[:sequence_length]
return out_tensor.tolist()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Optional[int]:
_a : Optional[int] =ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_a : Tuple =unicodedata.category(_UpperCAmelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : int = -100
__UpperCamelCase : str = "pt"
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int ) -> Any:
'''simple docstring'''
import torch
_a : Union[str, Any] ="""label""" if """label""" in features[0].keys() else """labels"""
_a : List[str] =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a : Any =self.tokenizer.pad(
SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_a : Optional[int] =torch.tensor(batch["""entity_ids"""] ).shape[1]
_a : Optional[int] =self.tokenizer.padding_side
if padding_side == "right":
_a : Any =[
list(SCREAMING_SNAKE_CASE ) + [self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE )) for label in labels
]
else:
_a : Tuple =[
[self.label_pad_token_id] * (sequence_length - len(SCREAMING_SNAKE_CASE )) + list(SCREAMING_SNAKE_CASE ) for label in labels
]
_a : Tuple =[feature["""ner_tags"""] for feature in features]
_a : Tuple =padding_tensor(SCREAMING_SNAKE_CASE , -1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =[feature["""original_entity_spans"""] for feature in features]
_a : int =padding_tensor(SCREAMING_SNAKE_CASE , (-1, -1) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : int ={k: torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
__snake_case = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = SqueezeBertTokenizer
def __init__( self: Dict,A_: int=None,A_: int=None,A_: List[Any]=True,A_: Any="[UNK]",A_: List[Any]="[SEP]",A_: int="[PAD]",A_: Optional[Any]="[CLS]",A_: List[str]="[MASK]",A_: int=True,A_: Dict=None,**A_: int,):
'''simple docstring'''
super().__init__(
A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',A_ ) != do_lower_case
or normalizer_state.get('strip_accents',A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**A_ )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: Dict,A_: Tuple,A_: Union[str, Any]=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: Union[str, Any],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(A_,name=A_ )
return tuple(A_ )
| 1 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[Any] , ) -> List[str]:
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
_A = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def snake_case_ ( self : List[Any] ) -> Optional[int]:
_A = None
_A = None
_A = None
_A = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
_A = self.builder.as_dataset(
split='''train''' , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[int] , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
_A = dataset
_A = name
_A = con
_A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A = num_proc
_A = to_sql_kwargs
def snake_case_ ( self : str ) -> int:
_A = self.to_sql_kwargs.pop('''sql''' , __lowerCAmelCase )
_A = self.to_sql_kwargs.pop('''con''' , __lowerCAmelCase )
_A = self.to_sql_kwargs.pop('''index''' , __lowerCAmelCase )
_A = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def snake_case_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Dict:
_A , _A , _A = args
_A = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
_A = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_A = batch.to_pandas()
_A = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ) -> int:
_A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 2 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float , _UpperCAmelCase : float ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_UpperCAmelCase ) * abs(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 4 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
# TODO: upload to AWS
_lowercase = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : int = '''retribert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=8 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=True , _lowercase=128 , _lowercase=0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = share_encoders
_lowerCAmelCase = projection_dim
| 5 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 0 |
import operator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list , UpperCamelCase__: bool = False , UpperCamelCase__: list | None = None ):
SCREAMING_SNAKE_CASE__ = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE__ = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE__ = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase__ ):
if _operator(UpperCamelCase__ , sublist[-1] ):
sublist.append(UpperCamelCase__ )
arr.pop(UpperCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase__ )
else:
while sublist:
SCREAMING_SNAKE_CASE__ = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase__ ):
if not _operator(UpperCamelCase__ , UpperCamelCase__ ):
solution.insert(UpperCamelCase__ , UpperCamelCase__ )
break
else:
solution.append(UpperCamelCase__ )
strand_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 6 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Union[str, Any] = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 0 |
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase )) % modulo_value
def A ( __UpperCamelCase = 1_777 , __UpperCamelCase = 1_855 , __UpperCamelCase = 8 ) -> int:
A__ = base
for _ in range(1 , __UpperCamelCase ):
A__ = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 9 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase_ :
def __init__( self : str ):
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = 0
def UpperCamelCase_ ( self : Dict ):
return self.head == self.tail
def UpperCamelCase_ ( self : Optional[Any] , _A : Any ):
self.data.append(_A )
_UpperCamelCase = self.tail + 1
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.data[self.head]
_UpperCamelCase = self.head + 1
return ret
def UpperCamelCase_ ( self : Tuple ):
return self.tail - self.head
def UpperCamelCase_ ( self : Dict ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class lowerCAmelCase_ :
def __init__( self : Any , _A : Any ):
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
def UpperCamelCase_ ( self : Dict ):
return self.data
def UpperCamelCase_ ( self : List[Any] ):
return self.left
def UpperCamelCase_ ( self : Optional[Any] ):
return self.right
def UpperCamelCase_ ( self : Optional[Any] ):
return self.height
def UpperCamelCase_ ( self : List[Any] , _A : Any ):
_UpperCamelCase = data
def UpperCamelCase_ ( self : Dict , _A : MyNode | None ):
_UpperCamelCase = node
def UpperCamelCase_ ( self : Optional[int] , _A : MyNode | None ):
_UpperCamelCase = node
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
_UpperCamelCase = height
def _snake_case ( __snake_case ):
if node is None:
return 0
return node.get_height()
def _snake_case ( __snake_case , __snake_case ):
if a > b:
return a
return b
def _snake_case ( __snake_case ):
print('''left rotation node:''' , node.get_data() )
_UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__snake_case )
return ret
def _snake_case ( __snake_case ):
print('''right rotation node:''' , node.get_data() )
_UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
_UpperCamelCase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__snake_case )
return ret
def _snake_case ( __snake_case ):
_UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__snake_case ) )
return right_rotation(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__snake_case ) )
return left_rotation(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
if node is None:
return MyNode(__snake_case )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __snake_case ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_UpperCamelCase = right_rotation(__snake_case )
else:
_UpperCamelCase = lr_rotation(__snake_case )
else:
node.set_right(insert_node(node.get_right() , __snake_case ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_UpperCamelCase = rl_rotation(__snake_case )
else:
_UpperCamelCase = left_rotation(__snake_case )
_UpperCamelCase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__snake_case )
return node
def _snake_case ( __snake_case ):
while True:
_UpperCamelCase = root.get_right()
if right_child is None:
break
_UpperCamelCase = right_child
return root.get_data()
def _snake_case ( __snake_case ):
while True:
_UpperCamelCase = root.get_left()
if left_child is None:
break
_UpperCamelCase = left_child
return root.get_data()
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = root.get_left()
_UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_UpperCamelCase = get_left_most(__snake_case )
root.set_data(__snake_case )
root.set_right(del_node(__snake_case , __snake_case ) )
elif left_child is not None:
_UpperCamelCase = left_child
elif right_child is not None:
_UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__snake_case , __snake_case ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__snake_case , __snake_case ) )
if get_height(__snake_case ) - get_height(__snake_case ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_UpperCamelCase = left_rotation(__snake_case )
else:
_UpperCamelCase = rl_rotation(__snake_case )
elif get_height(__snake_case ) - get_height(__snake_case ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_UpperCamelCase = right_rotation(__snake_case )
else:
_UpperCamelCase = lr_rotation(__snake_case )
_UpperCamelCase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__snake_case )
return root
class lowerCAmelCase_ :
def __init__( self : Any ):
_UpperCamelCase = None
def UpperCamelCase_ ( self : Dict ):
return get_height(self.root )
def UpperCamelCase_ ( self : Any , _A : Any ):
print('''insert:''' + str(_A ) )
_UpperCamelCase = insert_node(self.root , _A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Any ):
print('''delete:''' + str(_A ) )
if self.root is None:
print('''Tree is empty!''' )
return
_UpperCamelCase = del_node(self.root , _A )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
_UpperCamelCase = ''''''
_UpperCamelCase = MyQueue()
q.push(self.root )
_UpperCamelCase = self.get_height()
if layer == 0:
return output
_UpperCamelCase = 0
while not q.is_empty():
_UpperCamelCase = q.pop()
_UpperCamelCase = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_UpperCamelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _A ) - 1:
_UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_lowerCAmelCase = AVLtree()
_lowerCAmelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 10 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase_ = get_logger(__name__)
class __A :
'''simple docstring'''
def __init__(self , A = None ) -> int:
"""simple docstring"""
_a = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_a = Extractor
def a__ (self , A ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_a = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def a__ (self , A , A ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def a__ (self , A , A = False ) -> str:
"""simple docstring"""
_a = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_a = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class __A ( A ):
'''simple docstring'''
@classmethod
@abstractmethod
def a__ (cls , A , **A ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def a__ (A , A ) -> None:
"""simple docstring"""
...
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : List[bytes] = []
@staticmethod
def a__ (A , A ) -> int:
"""simple docstring"""
with open(A , '''rb''' ) as f:
return f.read(A )
@classmethod
def a__ (cls , A , A = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
_a = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_a = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class __A ( A ):
'''simple docstring'''
@classmethod
def a__ (cls , A , **A ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(A )
@staticmethod
def a__ (A , A ) -> List[Any]:
"""simple docstring"""
def resolved(A ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A , A ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A , A ) -> bool:
# Links are interpreted relative to the directory containing the link
_a = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_a = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(A , A ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(A , A ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
_a = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : int = [b'\x1F\x8B']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
with gzip.open(A , '''rb''' ) as gzip_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def a__ (cls , A , A = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , '''rb''' ) as fp:
_a = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_a = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_a = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , '''r''' ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
with lzma.open(A ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(A , exist_ok=A )
_a = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = [b'\x28\xb5\x2F\xFD']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
_a = zstd.ZstdDecompressor()
with open(A , '''rb''' ) as ifh, open(A , '''wb''' ) as ofh:
dctx.copy_stream(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[str] = [b'\x42\x5A\x68']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
with bza.open(A , '''rb''' ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Tuple = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , '''r''' ) as archive:
archive.extractall(A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = [b'\x04\x22\x4D\x18']
@staticmethod
def a__ (A , A ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(A , '''rb''' ) as compressed_file:
with open(A , '''wb''' ) as extracted_file:
shutil.copyfileobj(A , A )
class __A :
'''simple docstring'''
__lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a__ (cls ) -> Tuple:
"""simple docstring"""
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a__ (A , A ) -> Tuple:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def a__ (cls , A , A = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=A , )
_a = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a__ (cls , A ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
_a = cls._get_magic_number_max_length()
_a = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def a__ (cls , A , A , A = None , A = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_a = str(Path(A ).with_suffix('''.lock''' ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=A , )
_a = extractor if extractor != '''deprecated''' else extractor_format
else:
_a = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 11 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
from manim import *
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = Rectangle(height=0.5 , width=0.5)
lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
lowercase__ : List[Any] = [mem.copy() for i in range(6)]
lowercase__ : Tuple = [mem.copy() for i in range(6)]
lowercase__ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : List[Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Dict = Text("""CPU""" , font_size=24)
lowercase__ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
cpu.move_to([-2.5, -0.5, 0])
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : str = [mem.copy() for i in range(1)]
lowercase__ : Any = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[int] = Text("""GPU""" , font_size=24)
lowercase__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
gpu.set_x(gpu.get_x() - 1)
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = [mem.copy() for i in range(6)]
lowercase__ : str = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[Any] = Text("""Model""" , font_size=24)
lowercase__ : Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
model.move_to([3, -1.0, 0])
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , )
lowercase__ : Optional[Any] = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowercase__ : Any = Square(side_length=2.2)
key.move_to([-5, 2, 0])
lowercase__ : Optional[int] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5) , Write(SCREAMING_SNAKE_CASE_) , Write(SCREAMING_SNAKE_CASE_))
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7)
cpu_target.move_to(SCREAMING_SNAKE_CASE_)
cpu_target.generate_target()
lowercase__ : Union[str, Any] = 0.4_6 / 4
lowercase__ : Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
cpu_targs.append(SCREAMING_SNAKE_CASE_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE_))
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5))
self.play(*SCREAMING_SNAKE_CASE_)
self.play(*SCREAMING_SNAKE_CASE_)
self.wait()
| 12 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
A__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Dict = {}
if "candidate_labels" in kwargs:
__lowerCamelCase : List[str] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__lowerCamelCase : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="This is a photo of {}." ) -> Tuple:
__lowerCamelCase : Union[str, Any] = load_image(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.image_processor(images=[image] , return_tensors=self.framework )
__lowerCamelCase : Optional[Any] = candidate_labels
__lowerCamelCase : int = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
__lowerCamelCase : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = [text_inputs]
return inputs
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Dict = model_inputs.pop('candidate_labels' )
__lowerCamelCase : int = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[Any] = text_inputs[0]
else:
# Batching case.
__lowerCamelCase : Optional[Any] = text_inputs[0][0]
__lowerCamelCase : Optional[int] = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : str = model_outputs.pop('candidate_labels' )
__lowerCamelCase : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__lowerCamelCase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__lowerCamelCase : Dict = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[Any] = [scores]
elif self.framework == "tf":
__lowerCamelCase : Union[str, Any] = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
__lowerCamelCase : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__lowerCamelCase : Optional[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result
| 13 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
import colorsys
from PIL import Image # type: ignore
def __UpperCAmelCase ( __a : float ,__a : float ,__a : int ) -> float:
"""simple docstring"""
_a : Dict = x
_a : Dict = y
for step in range(__a ): # noqa: B007
_a : Optional[Any] = a * a - b * b + x
_a : Dict = 2 * a * b + y
_a : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCAmelCase ( __a : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __UpperCAmelCase ( __a : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__a ,1 ,1 ) )
def __UpperCAmelCase ( __a : int = 800 ,__a : int = 600 ,__a : float = -0.6 ,__a : float = 0 ,__a : float = 3.2 ,__a : int = 50 ,__a : bool = True ,) -> Image.Image:
"""simple docstring"""
_a : List[str] = Image.new('''RGB''' ,(image_width, image_height) )
_a : List[Any] = img.load()
# loop through the image-coordinates
for image_x in range(__a ):
for image_y in range(__a ):
# determine the figure-coordinates based on the image-coordinates
_a : List[Any] = figure_width / image_width * image_height
_a : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a : List[str] = get_distance(__a ,__a ,__a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a : Optional[Any] = get_color_coded_rgb(__a )
else:
_a : Optional[int] = get_black_and_white_rgb(__a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = MgpstrTokenizer
A__ = False
A__ = {}
A__ = False
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
lowercase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
lowercase__ = """tester"""
lowercase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowercase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCamelCase__ (self : Optional[Any] ) -> str:
"""simple docstring"""
pass
| 15 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : str = pd.read_csv('sample_data.csv', header=None)
__A : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Union[str, Any] = df.iloc[:, 1:2]
__A : Any = actual_data.values.reshape(len_data, 1)
__A : List[Any] = MinMaxScaler().fit_transform(actual_data)
__A : List[Any] = 1_0
__A : Optional[Any] = 5
__A : Union[str, Any] = 2_0
__A : str = len_data - periods * look_back
__A : List[Any] = actual_data[:division]
__A : List[Any] = actual_data[division - look_back :]
__A , __A : str = [], []
__A , __A : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : Union[str, Any] = np.array(train_x)
__A : int = np.array(test_x)
__A : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
__A : str = np.array([list(i.ravel()) for i in test_y])
__A : Optional[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__A : List[str] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__A : str = model.predict(x_test) | 16 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(a__ ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=None , _lowerCAmelCase=2 , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 2
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Union[str, Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = DeiTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = DeiTForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = DeiTForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = DeiTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = DeiTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Union[str, Any] = False
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DeiTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
_lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self ) -> Tuple:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
_lowerCAmelCase = model(**_lowerCAmelCase ).loss
loss.backward()
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase = False
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase )
model.train()
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
_lowerCAmelCase = model(**_lowerCAmelCase ).loss
loss.backward()
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCAmelCase ),
*get_values(_lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
_lowerCAmelCase = problem_type["title"]
_lowerCAmelCase = problem_type["num_labels"]
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if problem_type["num_labels"] > 1:
_lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCAmelCase ) as warning_list:
_lowerCAmelCase = model(**_lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _snake_case ( self ) -> Any:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = DeiTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Any:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )
| 18 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
_UpperCamelCase = [trans(img.convert('''RGB''' ) ) for img in image]
_UpperCamelCase = torch.stack(__snake_case )
return image
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__a , scheduler=__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> int:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = init_latents.shape
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
# get latents
print('''add noise to latents at timestep''' , __a)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__a)
# 2. Preprocess image
_UpperCamelCase = preprocess(__a)
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# 4. Prepare latent variables
_UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a)
_UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a)
| 19 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.