code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Dict=4 , ) ->Optional[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE ( self : Tuple) ->int: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' A__ = FlaxRobertaPreLayerNormModelTester(self) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: '''simple docstring''' for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase__) A__ = model(np.ones((1, 1))) self.assertIsNotNone(UpperCAmelCase__) @require_flax class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' A__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase__) A__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa) A__ = model(UpperCAmelCase__)[0] A__ = [1, 11, 50_265] self.assertEqual(list(output.shape) , UpperCAmelCase__) # compare the actual values for a slice. A__ = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase__) A__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa) A__ = model(UpperCAmelCase__)[0] # compare the actual values for a slice. A__ = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class A__ ( A ): """simple docstring""" def __magic_name__ ( self : List[Any] , A_ : float ): '''simple docstring''' return 0.0 def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int | float, int | float]: """simple docstring""" _lowerCAmelCase : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCAmelCase : Optional[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" _lowerCAmelCase : Any = 512 _lowerCAmelCase : Tuple = [1] + [0] * (size - 1) _lowerCAmelCase : Optional[Any] = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs] _lowerCAmelCase : Dict = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : Dict = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase : str = 20 * np.logaa(SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds _lowerCAmelCase : Tuple = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(SCREAMING_SNAKE_CASE ) plt.show() def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" _lowerCAmelCase : Tuple = 512 _lowerCAmelCase : Tuple = [1] + [0] * (size - 1) _lowerCAmelCase : Dict = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs] _lowerCAmelCase : Tuple = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
703
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class A__ ( A ): """simple docstring""" def __init__( self : Tuple , *A_ : Optional[int] , **A_ : int ): '''simple docstring''' warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
503
0
"""simple docstring""" from __future__ import annotations from math import gcd def _lowerCamelCase( a , a = 2 , a = 1 , a = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(a , a , a ) -> int: return (pow(lowercase_ , 2 ) + step) % modulus for _ in range(lowercase_ ): # These track the position within the cycle detection logic. __a = seed __a = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __a = rand_fn(lowercase_ , lowercase_ , lowercase_ ) __a = rand_fn(lowercase_ , lowercase_ , lowercase_ ) __a = rand_fn(lowercase_ , lowercase_ , lowercase_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __a = gcd(hare - tortoise , lowercase_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __a = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) SCREAMING_SNAKE_CASE__:Any = parser.parse_args() SCREAMING_SNAKE_CASE__:Optional[int] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F'''{args.num} is probably prime''') else: SCREAMING_SNAKE_CASE__:Union[str, Any] = args.num // divisor print(F'''{args.num} = {divisor} * {quotient}''')
528
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
114
0
'''simple docstring''' from collections import deque class _a : """simple docstring""" def __init__( self : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ): '''simple docstring''' lowercase_ = process_name # process name lowercase_ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowercase_ = arrival_time lowercase_ = burst_time # remaining burst time lowercase_ = 0 # total time of the process wait in ready queue lowercase_ = 0 # time from arrival time to completion time class _a : """simple docstring""" def __init__( self : Dict , lowercase_ : int , lowercase_ : list[int] , lowercase_ : deque[Process] , lowercase_ : int , ): '''simple docstring''' lowercase_ = number_of_queues # time slice of queues that round robin algorithm applied lowercase_ = time_slices # unfinished process is in this ready_queue lowercase_ = queue # current time lowercase_ = current_time # finished process is in this sequence queue lowercase_ = deque() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def lowerCamelCase__ ( self : str , lowercase_ : list[Process] ): '''simple docstring''' lowercase_ = [] for i in range(len(lowercase_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def lowerCamelCase__ ( self : int , lowercase_ : list[Process] ): '''simple docstring''' lowercase_ = [] for i in range(len(lowercase_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def lowerCamelCase__ ( self : str , lowercase_ : list[Process] ): '''simple docstring''' lowercase_ = [] for i in range(len(lowercase_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def lowerCamelCase__ ( self : Any , lowercase_ : deque[Process] ): '''simple docstring''' return [q.burst_time for q in queue] def lowerCamelCase__ ( self : str , lowercase_ : Process ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def lowerCamelCase__ ( self : Tuple , lowercase_ : deque[Process] ): '''simple docstring''' lowercase_ = deque() # sequence deque of finished process while len(lowercase_ ) != 0: lowercase_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowercase_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowercase_ = 0 # set the process's turnaround time because it is finished lowercase_ = self.current_time - cp.arrival_time # set the completion time lowercase_ = self.current_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def lowerCamelCase__ ( self : Any , lowercase_ : deque[Process] , lowercase_ : int ): '''simple docstring''' lowercase_ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowercase_ ) ): lowercase_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowercase_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowercase_ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowercase_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowercase_ = 0 # set the finish time lowercase_ = self.current_time # update the process' turnaround time because it is finished lowercase_ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def lowerCamelCase__ ( self : int ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): lowercase_ , lowercase_ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest __snake_case = Process("""P1""", 0, 53) __snake_case = Process("""P2""", 0, 17) __snake_case = Process("""P3""", 0, 68) __snake_case = Process("""P4""", 0, 24) __snake_case = 3 __snake_case = [17, 25] __snake_case = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])}) __snake_case = Process("""P1""", 0, 53) __snake_case = Process("""P2""", 0, 17) __snake_case = Process("""P3""", 0, 68) __snake_case = Process("""P4""", 0, 24) __snake_case = 3 __snake_case = [17, 25] __snake_case = deque([Pa, Pa, Pa, Pa]) __snake_case = MLFQ(number_of_queues, time_slices, queue, 0) __snake_case = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( f'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( f'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
603
'''simple docstring''' from __future__ import annotations def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->list: lowercase_ = [] lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) lowercase_ = result + left + right return input_list def A_ ( SCREAMING_SNAKE_CASE_ ) ->list: if len(SCREAMING_SNAKE_CASE_ ) <= 1: return input_list lowercase_ = list(SCREAMING_SNAKE_CASE_ ) # iteration for two-way merging lowercase_ = 2 while p <= len(SCREAMING_SNAKE_CASE_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ): lowercase_ = i lowercase_ = i + p - 1 lowercase_ = (low + high + 1) // 2 lowercase_ = merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # final merge of last two parts if p * 2 >= len(SCREAMING_SNAKE_CASE_ ): lowercase_ = i lowercase_ = merge(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __snake_case = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": __snake_case = [] else: __snake_case = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
603
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class lowercase_ : """simple docstring""" def __init__( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Optional[Any]=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Dict=32, UpperCamelCase__ : Any=2, UpperCamelCase__ : Optional[int]=4, UpperCamelCase__ : Tuple=37, UpperCamelCase__ : Union[str, Any]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=0, ) -> str: _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = projection_dim def __UpperCAmelCase ( self : Any ) -> Optional[Any]: _A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _A = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size], self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) _A = ids_tensor([self.batch_size], self.num_choices ) _A = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, ) _A = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ) -> int: _A = TFDPRContextEncoder(config=UpperCamelCase__ ) _A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ ) _A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ ) _A = model(UpperCamelCase__ ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : str ) -> int: _A = TFDPRQuestionEncoder(config=UpperCamelCase__ ) _A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ ) _A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ ) _A = model(UpperCamelCase__ ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) ) def __UpperCAmelCase ( self : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ) -> Any: _A = TFDPRReader(config=UpperCamelCase__ ) _A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) ) def __UpperCAmelCase ( self : Dict ) -> Dict: _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'input_ids': input_ids} return config, inputs_dict @require_tf class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" __lowerCAmelCase = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) __lowerCAmelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: _A = TFDPRModelTester(self ) _A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : List[str] ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ ) def __UpperCAmelCase ( self : int ) -> List[str]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ ) def __UpperCAmelCase ( self : int ) -> Tuple: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ ) @slow def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]: for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFDPRReader.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_tf class lowercase_ ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: _A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' ) _A = tf.constant( [[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP] _A = model(UpperCamelCase__ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. _A = tf.constant( [ [ 0.03_236_253, 0.12_753_335, 0.16_818_509, 0.00_279_786, 0.3_896_933, 0.24_264_945, 0.2_178_971, -0.02_335_227, -0.08_481_959, -0.14_324_117, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4 ) )
107
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
18
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class lowerCamelCase__ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping]): '''simple docstring''' def __init__( self :List[Any] , a :Union[str, Any]=None , **a :str ) -> Any: super().__init__(features=__UpperCamelCase ) __UpperCamelCase : Optional[int] = torch_tensor_kwargs import torch # noqa import torch at initialization def _lowerCamelCase ( self :str , a :Optional[int] ) -> Optional[Any]: import torch if isinstance(__UpperCamelCase , __UpperCamelCase ) and column: if all( isinstance(__UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(__UpperCamelCase ) return column def _lowerCamelCase ( self :Union[str, Any] , a :Tuple ) -> Optional[Any]: import torch if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ): return value elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() __UpperCamelCase : str = {} if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): __UpperCamelCase : Optional[Any] = {"dtype": torch.intaa} elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): __UpperCamelCase : List[str] = {"dtype": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__UpperCamelCase , PIL.Image.Image ): __UpperCamelCase : Tuple = np.asarray(__UpperCamelCase ) return torch.tensor(__UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} ) def _lowerCamelCase ( self :Tuple , a :int ) -> Optional[int]: import torch # support for torch, tf, jax etc. if hasattr(__UpperCamelCase , "__array__" ) and not isinstance(__UpperCamelCase , torch.Tensor ): __UpperCamelCase : Tuple = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__UpperCamelCase , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] ) elif isinstance(__UpperCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] ) return self._tensorize(__UpperCamelCase ) def _lowerCamelCase ( self :int , a :Union[str, Any] ) -> Union[str, Any]: return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase ) def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[int]: __UpperCamelCase : str = self.numpy_arrow_extractor().extract_row(__UpperCamelCase ) __UpperCamelCase : int = self.python_features_decoder.decode_row(__UpperCamelCase ) return self.recursive_tensorize(__UpperCamelCase ) def _lowerCamelCase ( self :List[str] , a :Union[str, Any] ) -> int: __UpperCamelCase : List[str] = self.numpy_arrow_extractor().extract_column(__UpperCamelCase ) __UpperCamelCase : List[str] = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] ) __UpperCamelCase : Dict = self.recursive_tensorize(__UpperCamelCase ) __UpperCamelCase : Any = self._consolidate(__UpperCamelCase ) return column def _lowerCamelCase ( self :Any , a :str ) -> str: __UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase ) __UpperCamelCase : int = self.python_features_decoder.decode_batch(__UpperCamelCase ) __UpperCamelCase : Optional[Any] = self.recursive_tensorize(__UpperCamelCase ) for column_name in batch: __UpperCamelCase : List[str] = self._consolidate(batch[column_name] ) return batch
702
import re import string import numpy as np import datasets lowercase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' lowercase : List[str] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): '''simple docstring''' def _lowerCamelCase ( self :Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _lowerCamelCase ( self :int , a :Optional[Any] , a :Dict , a :Optional[int]=None , a :int=False , a :Tuple=False , a :Optional[int]=False , ) -> Any: if regexes_to_ignore is not None: for s in regexes_to_ignore: __UpperCamelCase : List[Any] = np.array([re.sub(a , "" , a ) for x in predictions] ) __UpperCamelCase : Optional[Any] = np.array([re.sub(a , "" , a ) for x in references] ) else: __UpperCamelCase : Optional[int] = np.asarray(a ) __UpperCamelCase : List[str] = np.asarray(a ) if ignore_case: __UpperCamelCase : Optional[int] = np.char.lower(a ) __UpperCamelCase : str = np.char.lower(a ) if ignore_punctuation: __UpperCamelCase : Tuple = string.punctuation.maketrans("" , "" , string.punctuation ) __UpperCamelCase : int = np.char.translate(a , table=a ) __UpperCamelCase : str = np.char.translate(a , table=a ) if ignore_numbers: __UpperCamelCase : List[str] = string.digits.maketrans("" , "" , string.digits ) __UpperCamelCase : Tuple = np.char.translate(a , table=a ) __UpperCamelCase : Union[str, Any] = np.char.translate(a , table=a ) __UpperCamelCase : List[Any] = predictions == references return {"exact_match": np.mean(a ) * 1_0_0}
94
0
"""simple docstring""" import math import unittest def SCREAMING_SNAKE_CASE ( snake_case): assert isinstance(snake_case, snake_case) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(snake_case) + 1), 6): if number % i == 0 or number % (i + 2) == 0: return False return True class _A ( unittest.TestCase ): """simple docstring""" def lowercase ( self : Optional[int] ) -> Optional[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def lowercase ( self : str ) -> Any: with self.assertRaises(a__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , ) self.assertFalse( is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
564
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer A_ : Optional[int] =logging.get_logger(__name__) A_ : Optional[int] ={"""vocab_file""": """vocab.txt"""} A_ : Union[str, Any] ={ """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } A_ : Tuple ={ """YituTech/conv-bert-base""": 5_1_2, """YituTech/conv-bert-medium-small""": 5_1_2, """YituTech/conv-bert-small""": 5_1_2, } A_ : str ={ """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class __a ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Tuple = ConvBertTokenizer def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ): super().__init__( a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , ) _lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , a__ ) != do_lower_case or normalizer_state.get('strip_accents' , a__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , a__ ) != tokenize_chinese_chars ): _lowerCamelCase = getattr(a__ , normalizer_state.pop('type' ) ) _lowerCamelCase = do_lower_case _lowerCamelCase = strip_accents _lowerCamelCase = tokenize_chinese_chars _lowerCamelCase = normalizer_class(**a__ ) _lowerCamelCase = do_lower_case def snake_case_ ( self , a__ , a__=None ): _lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self , a__ , a__ = None ): _lowerCamelCase = [self.sep_token_id] _lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self , a__ , a__ = None ): _lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ )
650
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file __lowerCAmelCase : Union[str, Any] ='Run commands across TPU VMs for initial setup before running `accelerate launch`.' def _UpperCamelCase ( lowercase__=None ): if subparsers is not None: __SCREAMING_SNAKE_CASE : Dict = subparsers.add_parser('''tpu-config''' , description=_description ) else: __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description ) # Core arguments __SCREAMING_SNAKE_CASE : List[str] = parser.add_argument_group( '''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' ) config_args.add_argument( '''--config_file''' , type=lowercase__ , default=lowercase__ , help='''Path to the config file to use for accelerate.''' , ) config_args.add_argument( '''--tpu_name''' , default=lowercase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , ) config_args.add_argument( '''--tpu_zone''' , default=lowercase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , ) __SCREAMING_SNAKE_CASE : List[str] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' ) pod_args.add_argument( '''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , ) pod_args.add_argument( '''--command_file''' , default=lowercase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , ) pod_args.add_argument( '''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , ) pod_args.add_argument( '''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , ) pod_args.add_argument( '''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , ) pod_args.add_argument( '''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' ) if subparsers is not None: parser.set_defaults(func=lowercase__ ) return parser def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: __SCREAMING_SNAKE_CASE : Optional[int] = defaults.command_file if not args.command and defaults.commands is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = defaults.commands if not args.tpu_name: __SCREAMING_SNAKE_CASE : Optional[int] = defaults.tpu_name if not args.tpu_zone: __SCREAMING_SNAKE_CASE : List[Any] = defaults.tpu_zone if args.accelerate_version == "dev": __SCREAMING_SNAKE_CASE : Dict = '''git+https://github.com/huggingface/accelerate.git''' elif args.accelerate_version == "latest": __SCREAMING_SNAKE_CASE : Optional[Any] = '''accelerate -U''' elif isinstance(parse(args.accelerate_version ) , lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = F'''accelerate=={args.accelerate_version}''' if not args.command_file and not args.command: raise ValueError('''You must specify either a command file or a command to run on the pod.''' ) if args.command_file: with open(args.command_file , '''r''' ) as f: __SCREAMING_SNAKE_CASE : Any = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , lowercase__ ): __SCREAMING_SNAKE_CASE : List[Any] = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate __SCREAMING_SNAKE_CASE : int = ['''cd /usr/share'''] if args.install_accelerate: new_cmd += [F'''pip install {args.accelerate_version}'''] new_cmd += args.command __SCREAMING_SNAKE_CASE : List[str] = '''; '''.join(lowercase__ ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess __SCREAMING_SNAKE_CASE : Dict = ['''gcloud'''] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F'''Running {' '.join(lowercase__ )}''' ) return subprocess.run(lowercase__ ) print('''Successfully setup pod.''' ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : List[Any] = tpu_command_parser() __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() tpu_command_launcher(lowercase__ )
702
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (EulerDiscreteScheduler,) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10 def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> int: __SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1_100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**lowerCAmelCase__ ) return config def __magic_name__( self :str ) -> Optional[Any]: for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def __magic_name__( self :str ) -> List[str]: for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def __magic_name__( self :Dict ) -> Any: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def __magic_name__( self :Dict ) -> int: __SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config() __SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model() __SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE : Any = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample __SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def __magic_name__( self :Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' ) __SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) __SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.prev_sample __SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3 def __magic_name__( self :Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config() __SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = self.dummy_model() __SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: __SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample __SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def __magic_name__( self :List[Any] ) -> int: __SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config() __SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() __SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() __SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: __SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = output.prev_sample __SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
260
0
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _snake_case = '''src/transformers''' _snake_case = '''docs/source/en''' _snake_case = '''.''' def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]: with open(snake_case__, "r", encoding="utf-8", newline="\n" ) as f: __UpperCAmelCase : str = f.readlines() # Find the start prompt. __UpperCAmelCase : Union[str, Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 __UpperCAmelCase : Optional[int] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _snake_case = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. _snake_case = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') _snake_case = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _snake_case = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. _snake_case = direct_transformers_import(TRANSFORMERS_PATH) def _UpperCamelCase ( snake_case__ ) -> Union[str, Any]: __UpperCAmelCase : int = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", snake_case__ ) return [m.group(0 ) for m in matches] def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]: __UpperCAmelCase : Any = 2 if text == "✅" or text == "❌" else len(snake_case__ ) __UpperCAmelCase : Optional[Any] = (width - text_length) // 2 __UpperCAmelCase : Any = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _UpperCamelCase ( ) -> Union[str, Any]: __UpperCAmelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCAmelCase : Dict = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __UpperCAmelCase : Tuple = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __UpperCAmelCase : int = collections.defaultdict(snake_case__ ) __UpperCAmelCase : str = collections.defaultdict(snake_case__ ) __UpperCAmelCase : str = collections.defaultdict(snake_case__ ) __UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ ) __UpperCAmelCase : Optional[int] = collections.defaultdict(snake_case__ ) # Let's lookup through all transformers object (once). for attr_name in dir(snake_case__ ): __UpperCAmelCase : Dict = None if attr_name.endswith("Tokenizer" ): __UpperCAmelCase : Tuple = slow_tokenizers __UpperCAmelCase : Tuple = attr_name[:-9] elif attr_name.endswith("TokenizerFast" ): __UpperCAmelCase : Dict = fast_tokenizers __UpperCAmelCase : int = attr_name[:-13] elif _re_tf_models.match(snake_case__ ) is not None: __UpperCAmelCase : Tuple = tf_models __UpperCAmelCase : Optional[Any] = _re_tf_models.match(snake_case__ ).groups()[0] elif _re_flax_models.match(snake_case__ ) is not None: __UpperCAmelCase : List[Any] = flax_models __UpperCAmelCase : Optional[int] = _re_flax_models.match(snake_case__ ).groups()[0] elif _re_pt_models.match(snake_case__ ) is not None: __UpperCAmelCase : Any = pt_models __UpperCAmelCase : Dict = _re_pt_models.match(snake_case__ ).groups()[0] if lookup_dict is not None: while len(snake_case__ ) > 0: if attr_name in model_name_to_prefix.values(): __UpperCAmelCase : int = True break # Try again after removing the last word in the name __UpperCAmelCase : Dict = "".join(camel_case_split(snake_case__ )[:-1] ) # Let's build that table! __UpperCAmelCase : Optional[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __UpperCAmelCase : int = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __UpperCAmelCase : Dict = [len(snake_case__ ) + 2 for c in columns] __UpperCAmelCase : Dict = max([len(snake_case__ ) for name in model_names] ) + 2 # Build the table per se __UpperCAmelCase : List[str] = "|" + "|".join([_center_text(snake_case__, snake_case__ ) for c, w in zip(snake_case__, snake_case__ )] ) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n" __UpperCAmelCase : Any = {True: "✅", False: "❌"} for name in model_names: __UpperCAmelCase : str = model_name_to_prefix[name] __UpperCAmelCase : List[Any] = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(snake_case__, snake_case__ ) for l, w in zip(snake_case__, snake_case__ )] ) + "|\n" return table def _UpperCamelCase ( snake_case__=False ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = _find_text_in_file( filename=os.path.join(snake_case__, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", ) __UpperCAmelCase : List[Any] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(snake_case__, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _snake_case = parser.parse_args() check_model_table(args.fix_and_overwrite)
382
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets _snake_case = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _snake_case = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' _snake_case = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def _lowerCamelCase ( self: Dict ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: int = CHRF.CHAR_ORDER , __lowerCamelCase: int = CHRF.WORD_ORDER , __lowerCamelCase: int = CHRF.BETA , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ) -> Optional[Any]: __UpperCAmelCase : List[str] = len(references[0] ) if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __UpperCAmelCase : List[Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )] __UpperCAmelCase : Optional[int] = CHRF(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __UpperCAmelCase : List[str] = sb_chrf.corpus_score(__lowerCamelCase , __lowerCamelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
382
1
from torch import nn def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'''Unsupported activation function: {act_fn}''' )
708
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __lt__( self : Tuple , _lowerCAmelCase : Optional[int] ): return self[-1] < other[-1] def __eq__( self : Tuple , _lowerCAmelCase : Tuple ): return self[-1] == other[-1] def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ): '''simple docstring''' __snake_case : list[Stack] = [] # sort into stacks for element in collection: __snake_case : Dict = Stack([element] ) __snake_case : int = bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if i != len(__SCREAMING_SNAKE_CASE ): stacks[i].append(__SCREAMING_SNAKE_CASE ) else: stacks.append(__SCREAMING_SNAKE_CASE ) # use a heap-based merge to merge stack efficiently __snake_case : int = merge(*(reversed(__SCREAMING_SNAKE_CASE ) for stack in stacks) ) return collection if __name__ == "__main__": lowercase_ = input("Enter numbers separated by a comma:\n").strip() lowercase_ = [int(item) for item in user_input.split(",")] print(patience_sort(unsorted))
390
0
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCAmelCase = 2 class UpperCAmelCase_ : def __init__( self : Union[str, Any] , *, # begin keyword-only arguments __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Tuple=None , ) -> List[Any]: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = bos, unk, pad, eos _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = {} _UpperCamelCase = self.add_symbol(__UpperCamelCase ) _UpperCamelCase = self.add_symbol(__UpperCamelCase ) _UpperCamelCase = self.add_symbol(__UpperCamelCase ) _UpperCamelCase = self.add_symbol(__UpperCamelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__UpperCamelCase ) _UpperCamelCase = len(self.symbols ) def __eq__( self : Optional[Any] , __UpperCamelCase : List[Any] ) -> Dict: return self.indices == other.indices def __getitem__( self : List[str] , __UpperCamelCase : int ) -> Tuple: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Any ) -> str: return len(self.symbols ) def __contains__( self : str , __UpperCamelCase : List[Any] ) -> Any: return sym in self.indices @classmethod def _UpperCamelCase ( cls : Tuple , __UpperCamelCase : str ) -> str: _UpperCamelCase = cls() d.add_from_file(__UpperCamelCase ) return d def _UpperCamelCase ( self : str , __UpperCamelCase : int , __UpperCamelCase : Tuple=1 , __UpperCamelCase : str=False ) -> List[Any]: if word in self.indices and not overwrite: _UpperCamelCase = self.indices[word] _UpperCamelCase = self.count[idx] + n return idx else: _UpperCamelCase = len(self.symbols ) _UpperCamelCase = idx self.symbols.append(__UpperCamelCase ) self.count.append(__UpperCamelCase ) return idx def _UpperCamelCase ( self : str , __UpperCamelCase : Dict ) -> Optional[int]: return 0 def _UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] ) -> Dict: if isinstance(__UpperCamelCase , __UpperCamelCase ): try: with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(__UpperCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__UpperCamelCase ) ) return _UpperCamelCase = f.readlines() _UpperCamelCase = self._load_meta(__UpperCamelCase ) for line in lines[indices_start_line:]: try: _UpperCamelCase , _UpperCamelCase = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": _UpperCamelCase = True _UpperCamelCase , _UpperCamelCase = line.rsplit(''' ''' , 1 ) else: _UpperCamelCase = False _UpperCamelCase = int(__UpperCamelCase ) _UpperCamelCase = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(__UpperCamelCase ) ) self.add_symbol(__UpperCamelCase , n=__UpperCamelCase , overwrite=__UpperCamelCase ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def lowercase ( a__ : List[Any] ) -> int: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _UpperCamelCase = dict((re.sub(R'''@@$''' , '''''' , a__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , a__ ), v) for k, v in d.items() ) _UpperCamelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] _UpperCamelCase = d[k] # restore return da def lowercase ( a__ : int , a__ : int ) -> str: # prep if not os.path.exists(a__ ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(a__ , exist_ok=a__ ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models _UpperCamelCase = os.path.join(a__ , '''checkpoint.pt''' ) if not os.path.isfile(a__ ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) _UpperCamelCase = torch.load(a__ , map_location='''cpu''' ) _UpperCamelCase = chkpt['''cfg''']['''model'''] # dicts _UpperCamelCase = os.path.join(a__ , '''dict.txt''' ) if not os.path.isfile(a__ ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) _UpperCamelCase = Dictionary.load(a__ ) _UpperCamelCase = rewrite_dict_keys(src_dict.indices ) _UpperCamelCase = len(a__ ) _UpperCamelCase = os.path.join(a__ , VOCAB_FILES_NAMES['''vocab_file'''] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(a__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) ) # merges_file (bpecodes) _UpperCamelCase = os.path.join(a__ , '''bpecodes''' ) if not os.path.isfile(a__ ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) _UpperCamelCase = os.path.join(a__ , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(a__ , a__ ) # model config _UpperCamelCase = os.path.join(a__ , '''config.json''' ) _UpperCamelCase = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1e-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(a__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) ) # tokenizer config _UpperCamelCase = os.path.join(a__ , a__ ) _UpperCamelCase = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1024, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(a__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(a__ , ensure_ascii=a__ , indent=a__ ) ) # model _UpperCamelCase = chkpt['''model'''] # remove unneeded keys _UpperCamelCase = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(a__ , a__ ) _UpperCamelCase = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): _UpperCamelCase = model_state_dict.pop(a__ ) else: _UpperCamelCase = model_state_dict.pop(a__ ) _UpperCamelCase = BioGptConfig.from_pretrained(a__ ) _UpperCamelCase = BioGptForCausalLM(a__ ) # check that it loads ok model_new.load_state_dict(a__ ) # save _UpperCamelCase = os.path.join(a__ , a__ ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(a__ , a__ ) print('''Conversion is done!''' ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
420
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
420
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
622
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = XLMProphetNetTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True def lowerCamelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''[PAD]''' __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(__UpperCAmelCase ) , 1012 ) def lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) __lowerCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def lowerCamelCase ( self ): '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = '''Hello World!''' __lowerCamelCase = [35389, 6672, 49, 2] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def lowerCamelCase ( self ): '''simple docstring''' # fmt: off __lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
622
1
import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : int = data _lowercase : Optional[int] = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0] @staticmethod def __a ( _lowerCAmelCase , _lowerCAmelCase ): return ((n << b) | (n >> (3_2 - b))) & 0xff_fff_fff def __a ( self ): _lowercase : Union[str, Any] = b'\x80' + b'\x00' * (6_3 - (len(self.data ) + 8) % 6_4) _lowercase : List[Any] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) ) return padded_data def __a ( self ): return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def __a ( self , _lowerCAmelCase ): _lowercase : str = list(struct.unpack('>16L' , _lowerCAmelCase ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): _lowercase : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def __a ( self ): _lowercase : Dict = self.padding() _lowercase : int = self.split_blocks() for block in self.blocks: _lowercase : List[str] = self.expand_block(_lowerCAmelCase ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: _lowercase : Any = (b & c) | ((~b) & d) _lowercase : List[Any] = 0x5a_827_999 elif 2_0 <= i < 4_0: _lowercase : Optional[Any] = b ^ c ^ d _lowercase : Union[str, Any] = 0x6e_d9e_ba1 elif 4_0 <= i < 6_0: _lowercase : Optional[int] = (b & c) | (b & d) | (c & d) _lowercase : Tuple = 0x8f_1bb_cdc elif 6_0 <= i < 8_0: _lowercase : Any = b ^ c ^ d _lowercase : Optional[Any] = 0xca_62c_1d6 _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = ( self.rotate(_lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff, a, self.rotate(_lowerCAmelCase , 3_0 ), c, d, ) _lowercase : Optional[Any] = ( self.h[0] + a & 0xff_fff_fff, self.h[1] + b & 0xff_fff_fff, self.h[2] + c & 0xff_fff_fff, self.h[3] + d & 0xff_fff_fff, self.h[4] + e & 0xff_fff_fff, ) return ("{:08x}" * 5).format(*self.h ) def __magic_name__ ( ) -> List[Any]: _lowercase : Union[str, Any] = b'Test String' assert SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324 def __magic_name__ ( ) -> List[str]: _lowercase : List[Any] = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) _lowercase : Optional[Any] = parser.parse_args() _lowercase : Any = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: _lowercase : Dict = f.read() else: _lowercase : List[Any] = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) print(SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
66
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowerCAmelCase ( __UpperCamelCase ): def __init__( self : str , UpperCAmelCase : Dict , UpperCAmelCase : Dict=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : int=False , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[Any]=64 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict=1 , ) -> List[Any]: lowerCamelCase__ : Any = parent lowerCamelCase__ : Tuple = batch_size lowerCamelCase__ : Union[str, Any] = seq_length lowerCamelCase__ : Optional[Any] = is_training lowerCamelCase__ : Optional[Any] = use_input_mask lowerCamelCase__ : List[Any] = use_token_type_ids lowerCamelCase__ : str = use_labels lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Any = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : List[str] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : Dict = type_vocab_size lowerCamelCase__ : Optional[int] = type_sequence_label_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : int = num_labels lowerCamelCase__ : Tuple = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : Dict = q_groups lowerCamelCase__ : Optional[Any] = k_groups lowerCamelCase__ : Any = v_groups lowerCamelCase__ : List[str] = post_attention_groups lowerCamelCase__ : Dict = intermediate_groups lowerCamelCase__ : Optional[int] = output_groups def A_ ( self : str ) -> str: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Tuple = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : Dict = None if self.use_labels: lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Union[str, Any] ) -> List[Any]: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def A_ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ) -> List[str]: lowerCamelCase__ : List[Any] = SqueezeBertModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Optional[Any]: lowerCamelCase__ : List[Any] = SqueezeBertForMaskedLM(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCamelCase__ : List[Any] = model( UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any ) -> Any: lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = SqueezeBertForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]: lowerCamelCase__ : str = self.num_labels lowerCamelCase__ : int = SqueezeBertForTokenClassification(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCamelCase__ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Tuple: lowerCamelCase__ : Optional[int] = self.num_choices lowerCamelCase__ : List[str] = SqueezeBertForMultipleChoice(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() lowerCamelCase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : str = model( UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self : Optional[Any] ) -> Optional[int]: lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Tuple = config_and_inputs lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCAmelCase__ = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = False def A_ ( self : Union[str, Any] ) -> Dict: lowerCamelCase__ : Optional[Any] = SqueezeBertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , dim=37 ) def A_ ( self : str ) -> str: self.config_tester.run_common_tests() def A_ ( self : str ) -> int: lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase ) def A_ ( self : Union[str, Any] ) -> Tuple: lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase ) def A_ ( self : Union[str, Any] ) -> Any: lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase ) def A_ ( self : Union[str, Any] ) -> List[str]: lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Optional[Any]: lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase ) def A_ ( self : Any ) -> Optional[Any]: lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase ) @slow def A_ ( self : Optional[int] ) -> Dict: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = SqueezeBertModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase ( unittest.TestCase ): @slow def A_ ( self : Optional[Any] ) -> List[str]: lowerCamelCase__ : Dict = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) lowerCamelCase__ : Optional[int] = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) lowerCamelCase__ : Optional[int] = model(UpperCAmelCase )[0] lowerCamelCase__ : Dict = torch.Size((1, 3) ) self.assertEqual(output.shape , UpperCAmelCase ) lowerCamelCase__ : List[str] = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
295
0
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ) -> Optional[int]: '''simple docstring''' assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' ,[False, True] ) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ : Tuple = tmp_path / '''cache''' UpperCAmelCase_ : Tuple = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase_ : Optional[int] = TextDatasetReader(lowerCamelCase_ ,cache_dir=lowerCamelCase_ ,keep_in_memory=lowerCamelCase_ ).read() _check_text_dataset(lowerCamelCase_ ,lowerCamelCase_ ) @pytest.mark.parametrize( 'features' ,[ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] ,) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path / '''cache''' UpperCAmelCase_ : Tuple = {'''text''': '''string'''} UpperCAmelCase_ : Tuple = features.copy() if features else default_expected_features UpperCAmelCase_ : Optional[int] = ( Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ : List[Any] = TextDatasetReader(lowerCamelCase_ ,features=lowerCamelCase_ ,cache_dir=lowerCamelCase_ ).read() _check_text_dataset(lowerCamelCase_ ,lowerCamelCase_ ) @pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] ) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]: '''simple docstring''' UpperCAmelCase_ : Dict = tmp_path / '''cache''' UpperCAmelCase_ : List[Any] = {'''text''': '''string'''} UpperCAmelCase_ : List[Any] = TextDatasetReader(lowerCamelCase_ ,cache_dir=lowerCamelCase_ ,split=lowerCamelCase_ ).read() _check_text_dataset(lowerCamelCase_ ,lowerCamelCase_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' ,[str, list] ) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int: '''simple docstring''' if issubclass(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : List[Any] = text_path elif issubclass(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : str = [text_path] UpperCAmelCase_ : List[Any] = tmp_path / '''cache''' UpperCAmelCase_ : Optional[Any] = {'''text''': '''string'''} UpperCAmelCase_ : int = TextDatasetReader(lowerCamelCase_ ,cache_dir=lowerCamelCase_ ).read() _check_text_dataset(lowerCamelCase_ ,lowerCamelCase_ ) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=("train",) ) -> Dict: '''simple docstring''' assert isinstance(lowerCamelCase_ ,lowerCamelCase_ ) for split in splits: UpperCAmelCase_ : Optional[int] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' ,[False, True] ) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple: '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = tmp_path / '''cache''' UpperCAmelCase_ : Tuple = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase_ : Dict = TextDatasetReader({'train': text_path} ,cache_dir=lowerCamelCase_ ,keep_in_memory=lowerCamelCase_ ).read() _check_text_datasetdict(lowerCamelCase_ ,lowerCamelCase_ ) @pytest.mark.parametrize( 'features' ,[ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] ,) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ : str = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" UpperCAmelCase_ : str = {'''text''': '''string'''} UpperCAmelCase_ : List[str] = features.copy() if features else default_expected_features UpperCAmelCase_ : str = ( Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ : int = TextDatasetReader({'train': text_path} ,features=lowerCamelCase_ ,cache_dir=lowerCamelCase_ ).read() _check_text_datasetdict(lowerCamelCase_ ,lowerCamelCase_ ) @pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] ) def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int: '''simple docstring''' if split: UpperCAmelCase_ : Dict = {split: text_path} else: UpperCAmelCase_ : Optional[int] = '''train''' UpperCAmelCase_ : int = {'''train''': text_path, '''test''': text_path} UpperCAmelCase_ : Optional[Any] = tmp_path / '''cache''' UpperCAmelCase_ : str = {'''text''': '''string'''} UpperCAmelCase_ : int = TextDatasetReader(lowerCamelCase_ ,cache_dir=lowerCamelCase_ ).read() _check_text_datasetdict(lowerCamelCase_ ,lowerCamelCase_ ,splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
702
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase__ = logging.get_logger(__name__) # General docstring lowerCAmelCase__ = "RegNetConfig" # Base docstring lowerCAmelCase__ = "facebook/regnet-y-040" lowerCAmelCase__ = [1, 1088, 7, 7] # Image classification docstring lowerCAmelCase__ = "facebook/regnet-y-040" lowerCAmelCase__ = "tabby, tabby cat" lowerCAmelCase__ = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case = 3 , _snake_case = 1 , _snake_case = 1 , _snake_case = "relu" , ) -> int: super().__init__() UpperCAmelCase_ : str = nn.Convad( _snake_case , _snake_case , kernel_size=_snake_case , stride=_snake_case , padding=kernel_size // 2 , groups=_snake_case , bias=_snake_case , ) UpperCAmelCase_ : List[Any] = nn.BatchNormad(_snake_case) UpperCAmelCase_ : Tuple = ACTaFN[activation] if activation is not None else nn.Identity() def _snake_case ( self , _snake_case) -> Tuple: UpperCAmelCase_ : Optional[int] = self.convolution(_snake_case) UpperCAmelCase_ : int = self.normalization(_snake_case) UpperCAmelCase_ : Optional[int] = self.activation(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case) -> List[Any]: super().__init__() UpperCAmelCase_ : Tuple = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act) UpperCAmelCase_ : Optional[Any] = config.num_channels def _snake_case ( self , _snake_case) -> Dict: UpperCAmelCase_ : List[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') UpperCAmelCase_ : Any = self.embedder(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case = 2) -> Optional[Any]: super().__init__() UpperCAmelCase_ : Any = nn.Convad(_snake_case , _snake_case , kernel_size=1 , stride=_snake_case , bias=_snake_case) UpperCAmelCase_ : Optional[Any] = nn.BatchNormad(_snake_case) def _snake_case ( self , _snake_case) -> Tensor: UpperCAmelCase_ : Optional[Any] = self.convolution(_snake_case) UpperCAmelCase_ : Dict = self.normalization(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case) -> Any: super().__init__() UpperCAmelCase_ : Tuple = nn.AdaptiveAvgPoolad((1, 1)) UpperCAmelCase_ : int = nn.Sequential( nn.Convad(_snake_case , _snake_case , kernel_size=1) , nn.ReLU() , nn.Convad(_snake_case , _snake_case , kernel_size=1) , nn.Sigmoid() , ) def _snake_case ( self , _snake_case) -> Any: # b c h w -> b c 1 1 UpperCAmelCase_ : Union[str, Any] = self.pooler(_snake_case) UpperCAmelCase_ : Any = self.attention(_snake_case) UpperCAmelCase_ : Optional[Any] = hidden_state * attention return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1) -> str: super().__init__() UpperCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : Any = max(1 , out_channels // config.groups_width) UpperCAmelCase_ : str = ( RegNetShortCut(_snake_case , _snake_case , stride=_snake_case) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Optional[int] = nn.Sequential( RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_snake_case , _snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act) , RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=_snake_case) , ) UpperCAmelCase_ : int = ACTaFN[config.hidden_act] def _snake_case ( self , _snake_case) -> Union[str, Any]: UpperCAmelCase_ : str = hidden_state UpperCAmelCase_ : List[Any] = self.layer(_snake_case) UpperCAmelCase_ : Dict = self.shortcut(_snake_case) hidden_state += residual UpperCAmelCase_ : Any = self.activation(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 1) -> int: super().__init__() UpperCAmelCase_ : List[Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : Optional[Any] = max(1 , out_channels // config.groups_width) UpperCAmelCase_ : Optional[int] = ( RegNetShortCut(_snake_case , _snake_case , stride=_snake_case) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Optional[Any] = nn.Sequential( RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(_snake_case , _snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act) , RegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=_snake_case) , ) UpperCAmelCase_ : Any = ACTaFN[config.hidden_act] def _snake_case ( self , _snake_case) -> Union[str, Any]: UpperCAmelCase_ : List[str] = hidden_state UpperCAmelCase_ : int = self.layer(_snake_case) UpperCAmelCase_ : Any = self.shortcut(_snake_case) hidden_state += residual UpperCAmelCase_ : Any = self.activation(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = 2 , _snake_case = 2 , ) -> Optional[int]: super().__init__() UpperCAmelCase_ : str = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer UpperCAmelCase_ : Dict = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _snake_case , _snake_case , _snake_case , stride=_snake_case , ) , *[layer(_snake_case , _snake_case , _snake_case) for _ in range(depth - 1)] , ) def _snake_case ( self , _snake_case) -> Dict: UpperCAmelCase_ : Optional[Any] = self.layers(_snake_case) return hidden_state class lowercase ( nn.Module ): def __init__( self , _snake_case) -> List[Any]: super().__init__() UpperCAmelCase_ : List[str] = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) UpperCAmelCase_ : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(_snake_case , config.depths[1:]): self.stages.append(RegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case)) def _snake_case ( self , _snake_case , _snake_case = False , _snake_case = True) -> BaseModelOutputWithNoAttention: UpperCAmelCase_ : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase_ : int = hidden_states + (hidden_state,) UpperCAmelCase_ : Tuple = stage_module(_snake_case) if output_hidden_states: UpperCAmelCase_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case) class lowercase ( a_ ): _lowerCamelCase : str= RegNetConfig _lowerCamelCase : Optional[int]= "regnet" _lowerCamelCase : Union[str, Any]= "pixel_values" _lowerCamelCase : List[str]= True def _snake_case ( self , _snake_case) -> List[Any]: if isinstance(_snake_case , nn.Convad): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu') elif isinstance(_snake_case , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def _snake_case ( self , _snake_case , _snake_case=False) -> List[Any]: if isinstance(_snake_case , _snake_case): UpperCAmelCase_ : str = value lowerCAmelCase__ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" lowerCAmelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top.", a_, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class lowercase ( a_ ): def __init__( self , _snake_case) -> List[str]: super().__init__(_snake_case) UpperCAmelCase_ : str = config UpperCAmelCase_ : Optional[int] = RegNetEmbeddings(_snake_case) UpperCAmelCase_ : int = RegNetEncoder(_snake_case) UpperCAmelCase_ : Any = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self , _snake_case , _snake_case = None , _snake_case = None) -> BaseModelOutputWithPoolingAndNoAttention: UpperCAmelCase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : int = self.embedder(_snake_case) UpperCAmelCase_ : str = self.encoder( _snake_case , output_hidden_states=_snake_case , return_dict=_snake_case) UpperCAmelCase_ : List[Any] = encoder_outputs[0] UpperCAmelCase_ : str = self.pooler(_snake_case) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", a_, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class lowercase ( a_ ): def __init__( self , _snake_case) -> Union[str, Any]: super().__init__(_snake_case) UpperCAmelCase_ : List[str] = config.num_labels UpperCAmelCase_ : Optional[int] = RegNetModel(_snake_case) # classification head UpperCAmelCase_ : Optional[Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ) -> ImageClassifierOutputWithNoAttention: UpperCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Optional[Any] = self.regnet(_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case) UpperCAmelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_ : int = self.classifier(_snake_case) UpperCAmelCase_ : List[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase_ : Optional[int] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase_ : Union[str, Any] = 'single_label_classification' else: UpperCAmelCase_ : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": UpperCAmelCase_ : int = MSELoss() if self.num_labels == 1: UpperCAmelCase_ : Dict = loss_fct(logits.squeeze() , labels.squeeze()) else: UpperCAmelCase_ : Union[str, Any] = loss_fct(_snake_case , _snake_case) elif self.config.problem_type == "single_label_classification": UpperCAmelCase_ : Optional[int] = CrossEntropyLoss() UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase_ : int = BCEWithLogitsLoss() UpperCAmelCase_ : Tuple = loss_fct(_snake_case , _snake_case) if not return_dict: UpperCAmelCase_ : Union[str, Any] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states)
471
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowercase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE (UpperCamelCase_ ): def __init__( self , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase) @torch.no_grad() def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 100 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ): '''simple docstring''' if audio_length_in_s is None: __A : str = self.unet.config.sample_size / self.unet.config.sample_rate __A : List[Any] = audio_length_in_s * self.unet.config.sample_rate __A : int = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' F' {3 * down_scale_factor / self.unet.config.sample_rate}.') __A : Optional[Any] = int(_UpperCAmelCase) if sample_size % down_scale_factor != 0: __A : Optional[int] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' ' process.') __A : int = int(_UpperCAmelCase) __A : Dict = next(iter(self.unet.parameters())).dtype __A : Dict = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_UpperCAmelCase , _UpperCAmelCase) and len(_UpperCAmelCase) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(_UpperCAmelCase)}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.') __A : List[str] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase) # set step values self.scheduler.set_timesteps(_UpperCAmelCase , device=audio.device) __A : List[Any] = self.scheduler.timesteps.to(_UpperCAmelCase) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output __A : Union[str, Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase).sample # 2. compute previous image: x_t -> t_t-1 __A : Optional[int] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase).prev_sample __A : str = audio.clamp(-1 , 1).float().cpu().numpy() __A : str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_UpperCAmelCase)
8
'''simple docstring''' def __a ( A__ , A__ ) -> Optional[int]: _enforce_args(A__ , A__ ) if n == 0: return 0 lowerCAmelCase = float("-inf" ) for i in range(1 , n + 1 ): lowerCAmelCase = max( A__ , prices[i - 1] + naive_cut_rod_recursive(n - i , A__ ) ) return max_revue def __a ( A__ , A__ ) -> Dict: _enforce_args(A__ , A__ ) lowerCAmelCase = [float("-inf" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(A__ , A__ , A__ ) def __a ( A__ , A__ , A__ ) -> Any: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowerCAmelCase = float("-inf" ) for i in range(1 , n + 1 ): lowerCAmelCase = max( A__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , A__ , A__ ) , ) lowerCAmelCase = max_revenue return max_rev[n] def __a ( A__ , A__ ) -> Optional[int]: _enforce_args(A__ , A__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowerCAmelCase = [float("-inf" ) for _ in range(n + 1 )] lowerCAmelCase = 0 for i in range(1 , n + 1 ): lowerCAmelCase = max_rev[i] for j in range(1 , i + 1 ): lowerCAmelCase = max(A__ , prices[j - 1] + max_rev[i - j] ) lowerCAmelCase = max_revenue_i return max_rev[n] def __a ( A__ , A__ ) -> Union[str, Any]: if n < 0: lowerCAmelCase = f"n must be greater than or equal to 0. Got n = {n}" raise ValueError(A__ ) if n > len(A__ ): lowerCAmelCase = ( "Each integral piece of rod must have a corresponding price. " f"Got n = {n} but length of prices = {len(A__ )}" ) raise ValueError(A__ ) def __a ( ) -> Union[str, Any]: lowerCAmelCase = [6, 10, 12, 15, 20, 23] lowerCAmelCase = len(A__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowerCAmelCase = 36 lowerCAmelCase = top_down_cut_rod(A__ , A__ ) lowerCAmelCase = bottom_up_cut_rod(A__ , A__ ) lowerCAmelCase = naive_cut_rod_recursive(A__ , A__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
649
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name __a : Union[str, Any] = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class __UpperCAmelCase ( snake_case__ ): """simple docstring""" lowercase = 42 class __UpperCAmelCase ( snake_case__ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( prior=SCREAMING_SNAKE_CASE , image_encoder=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , renderer=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if latents is None: UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) UpperCamelCase = latents.to(SCREAMING_SNAKE_CASE ) UpperCamelCase = latents * scheduler.init_noise_sigma return latents def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 ) -> str: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCamelCase = torch.device(f'''cuda:{gpu_id}''' ) UpperCamelCase = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ): UpperCamelCase = torch.cat(SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE , axis=0 ) if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values[0].unsqueeze(0 ) UpperCamelCase = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE ) UpperCamelCase = self.image_encoder(SCREAMING_SNAKE_CASE )["last_hidden_state"] UpperCamelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCamelCase = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: UpperCamelCase = torch.zeros_like(SCREAMING_SNAKE_CASE ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 25 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 4.0 , SCREAMING_SNAKE_CASE = 64 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ): UpperCamelCase = 1 elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): UpperCamelCase = image.shape[0] elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCamelCase = len(SCREAMING_SNAKE_CASE ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE )}''' ) UpperCamelCase = self._execution_device UpperCamelCase = batch_size * num_images_per_prompt UpperCamelCase = guidance_scale > 1.0 UpperCamelCase = self._encode_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # prior self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE ) UpperCamelCase = self.scheduler.timesteps UpperCamelCase = self.prior.config.num_embeddings UpperCamelCase = self.prior.config.embedding_dim UpperCamelCase = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCamelCase = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase = self.prior( SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , proj_embedding=SCREAMING_SNAKE_CASE , ).predicted_image_embedding # remove the variance UpperCamelCase , UpperCamelCase = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 ) UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCamelCase = self.scheduler.step( SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , sample=SCREAMING_SNAKE_CASE , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE ) UpperCamelCase = [] for i, latent in enumerate(SCREAMING_SNAKE_CASE ): print() UpperCamelCase = self.renderer.decode( latent[None, :] , SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(SCREAMING_SNAKE_CASE ) UpperCamelCase = torch.stack(SCREAMING_SNAKE_CASE ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) UpperCamelCase = images.cpu().numpy() if output_type == "pil": UpperCamelCase = [self.numpy_to_pil(SCREAMING_SNAKE_CASE ) for image in images] # Offload last model to CPU if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE )
414
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline __a : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(snake_case__ ) class __UpperCAmelCase ( snake_case__ ): """simple docstring""" def __init__( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" UpperCamelCase = {} if "candidate_labels" in kwargs: UpperCamelCase = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: UpperCamelCase = kwargs["hypothesis_template"] return preprocess_params, {}, {} def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> Optional[int]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png UpperCamelCase = requests.get(SCREAMING_SNAKE_CASE ).content else: with open(SCREAMING_SNAKE_CASE , "rb" ) as f: UpperCamelCase = f.read() if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase = ffmpeg_read(SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) UpperCamelCase = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) UpperCamelCase = candidate_labels UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels] UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE ) UpperCamelCase = [text_inputs] return inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = model_inputs.pop("candidate_labels" ) UpperCamelCase = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ): UpperCamelCase = text_inputs[0] else: # Batching case. UpperCamelCase = text_inputs[0][0] UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) UpperCamelCase = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = model_outputs.pop("candidate_labels" ) UpperCamelCase = model_outputs["logits"][0] if self.framework == "pt": UpperCamelCase = logits.softmax(dim=0 ) UpperCamelCase = probs.tolist() else: raise ValueError("`tf` framework not supported." ) UpperCamelCase = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] ) ] return result
414
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional @dataclass class _snake_case: __snake_case: Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} ) __snake_case: Optional[str] = field( default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} ) __snake_case: Optional[str] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} ) __snake_case: Optional[str] = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) __snake_case: Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} ) __snake_case: Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} ) __snake_case: Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} ) __snake_case: Optional[int] = field( default=1_00_00 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} ) __snake_case: Optional[float] = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} ) __snake_case: Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} ) __snake_case: Optional[int] = field( default=7_50 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} ) __snake_case: Optional[int] = field( default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} ) __snake_case: Optional[bool] = field( default=lowercase__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} ) __snake_case: Optional[int] = field(default=5_00_00 , metadata={'''help''': '''Maximum number of training steps.'''} ) __snake_case: Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) __snake_case: Optional[int] = field(default=10_24 , metadata={'''help''': '''Sequence lengths used for training.'''} ) __snake_case: Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} ) __snake_case: Optional[int] = field( default=10_24 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , ) __snake_case: Optional[str] = field( default=lowercase__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} ) __snake_case: Optional[bool] = field(default=lowercase__ , metadata={'''help''': '''If True the data is pretokenized.'''} ) @dataclass class _snake_case: __snake_case: Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) __snake_case: Optional[str] = field( default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} ) __snake_case: Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} ) __snake_case: Optional[int] = field( default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} ) __snake_case: Optional[int] = field(default=10_24 , metadata={'''help''': '''Length of sequences to be evaluated.'''} ) __snake_case: Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) @dataclass class _snake_case: __snake_case: Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} ) __snake_case: Optional[int] = field(default=lowercase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) __snake_case: Optional[int] = field( default=lowercase__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , ) __snake_case: Optional[bool] = field( default=lowercase__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} ) __snake_case: Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} ) __snake_case: Optional[int] = field(default=2_56 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} ) __snake_case: Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} ) __snake_case: Optional[float] = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} ) __snake_case: Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} ) __snake_case: Optional[int] = field( default=2_00 , metadata={'''help''': '''Number of completions to generate for each sample.'''} ) __snake_case: Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} ) __snake_case: Optional[str] = field( default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} ) __snake_case: Optional[str] = field( default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} ) __snake_case: Optional[int] = field( default=-1 , metadata={ '''help''': ( '''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive''' ''' number corresponds to which GPU device id to run on.''' ) } , ) @dataclass class _snake_case: __snake_case: Optional[int] = field( default=lowercase__ , metadata={ '''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.''' } , ) __snake_case: Optional[str] = field( default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} ) __snake_case: Optional[str] = field( default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} ) __snake_case: Optional[int] = field( default=10_00_00 , metadata={'''help''': '''Number of files to save per JSON output file.'''} ) __snake_case: Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) __snake_case: Optional[float] = field( default=10_00 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} ) __snake_case: Optional[float] = field( default=1_00 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} ) __snake_case: Optional[float] = field( default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} ) __snake_case: Optional[float] = field( default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} ) __snake_case: Optional[float] = field( default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} ) __snake_case: Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , ) __snake_case: Optional[bool] = field( default=lowercase__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} ) __snake_case: Optional[float] = field( default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} ) @dataclass class _snake_case: __snake_case: Optional[str] = field( default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} ) __snake_case: Optional[str] = field( default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} ) __snake_case: Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} ) __snake_case: Optional[int] = field(default=20_00_00 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} ) __snake_case: Optional[int] = field( default=3_27_68 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} ) __snake_case: Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} ) __snake_case: Optional[bool] = field(default=lowercase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) @dataclass class _snake_case: __snake_case: Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} ) __snake_case: Optional[str] = field( default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} ) __snake_case: Optional[str] = field( default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} ) __snake_case: Optional[int] = field(default=lowercase__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} ) @dataclass class _snake_case: __snake_case: Optional[str] = field( default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} ) __snake_case: Optional[str] = field( default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} ) __snake_case: Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} ) __snake_case: Optional[bool] = field(default=lowercase__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
531
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint a : List[str] = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } a : Dict = { "169M": 7_68, "430M": 10_24, "1B5": 20_48, "3B": 25_60, "7B": 40_96, "14B": 51_20, } def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = list(state_dict.keys() ) for name in state_dict_keys: UpperCAmelCase : str = state_dict.pop(__magic_name__ ) # emb -> embedding if name.startswith("emb." ): UpperCAmelCase : str = name.replace("emb." , "embeddings." ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("blocks.0.ln0" ): UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" ) # att -> attention UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ ) # ffn -> feed_forward UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(".time_mix_k" ): UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" ) # time_mix_v -> time_mix_value and reshape if name.endswith(".time_mix_v" ): UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" ) # time_mix_r -> time_mix_key and reshape if name.endswith(".time_mix_r" ): UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" ) if name != "head.weight": UpperCAmelCase : List[str] = "rwkv." + name UpperCAmelCase : List[Any] = weight return state_dict def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ): '''simple docstring''' if tokenizer_file is None: print("No `--tokenizer_file` provided, we will use the default tokenizer." ) UpperCAmelCase : List[str] = 5_0277 UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" ) else: UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ ) UpperCAmelCase : List[Any] = len(__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) # 2. Build the config UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: UpperCAmelCase : Union[str, Any] = candidate break if size is None: raise ValueError("Could not infer the size, please provide it with the `--size` argument." ) if size not in possible_sizes: raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." ) UpperCAmelCase : str = RwkvConfig( vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(__magic_name__ ) # 3. Download model file then convert state_dict UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" ) UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ ) # 4. Split in shards and save UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ ) for shard_file, shard in shards.items(): torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) ) if index is not None: UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) # Save the index as well with open(__magic_name__ , "w" , encoding="utf-8" ) as f: UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n" f.write(__magic_name__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." ) UpperCAmelCase : Any = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("Please provide a `model_name` to push the model to the Hub." ) UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ ) model.push_to_hub(__magic_name__ , max_shard_size="2GB" ) tokenizer.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) a : Dict = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
679
0
from __future__ import annotations from dataclasses import dataclass @dataclass class snake_case : """simple docstring""" __lowerCAmelCase = 42 __lowerCAmelCase = None __lowerCAmelCase = None def __lowercase ( _UpperCAmelCase ) -> int: '''simple docstring''' def is_valid_tree(_UpperCAmelCase ) -> bool: if node is None: return True if not isinstance(A__ , A__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(A__ ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , A__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , A__ ) ) return is_binary_search_tree_recursive_check(A__ , -float("inf" ) , float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
714
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCAmelCase__ = parser.parse_args() if args.model_type == "bert": lowerCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name) lowerCAmelCase__ = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCAmelCase__ = model.state_dict() lowerCAmelCase__ = {} for w in ["word_embeddings", "position_embeddings"]: lowerCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: lowerCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] lowerCAmelCase__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] lowerCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 lowerCAmelCase__ = state_dict['cls.predictions.decoder.weight'] lowerCAmelCase__ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"] lowerCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
576
0
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def snake_case__ ( lowercase , lowercase="shi-labs/oneformer_demo" ): with open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) as f: lowerCAmelCase_: List[Any] = json.load(lowercase ) lowerCAmelCase_: Any = {} lowerCAmelCase_: List[Any] = [] lowerCAmelCase_: Union[str, Any] = [] for key, info in class_info.items(): lowerCAmelCase_: Dict = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(lowercase ) ) lowerCAmelCase_: Optional[Any] = thing_ids lowerCAmelCase_: Optional[int] = class_names return metadata class _lowercase ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=10 , lowerCamelCase__=False , lowerCamelCase__=255 , lowerCamelCase__="shi-labs/oneformer_demo" , lowerCamelCase__="ade20k_panoptic.json" , lowerCamelCase__=10 , ): lowerCAmelCase_: str = parent lowerCAmelCase_: Any = batch_size lowerCAmelCase_: str = num_channels lowerCAmelCase_: Dict = min_resolution lowerCAmelCase_: Tuple = max_resolution lowerCAmelCase_: Tuple = do_resize lowerCAmelCase_: Optional[Any] = {"shortest_edge": 32, "longest_edge": 1_333} if size is None else size lowerCAmelCase_: Optional[int] = do_normalize lowerCAmelCase_: List[str] = image_mean lowerCAmelCase_: Any = image_std lowerCAmelCase_: Optional[Any] = class_info_file lowerCAmelCase_: List[str] = prepare_metadata(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase_: Dict = num_text lowerCAmelCase_: Tuple = repo_path # for the post_process_functions lowerCAmelCase_: List[str] = 2 lowerCAmelCase_: Any = 10 lowerCAmelCase_: str = 10 lowerCAmelCase_: Any = 3 lowerCAmelCase_: List[Any] = 4 lowerCAmelCase_: List[Any] = num_labels lowerCAmelCase_: int = do_reduce_labels lowerCAmelCase_: List[Any] = ignore_index def _a ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def _a ( self , lowerCamelCase__ , lowerCamelCase__=False ): if not batched: lowerCAmelCase_: List[Any] = image_inputs[0] if isinstance(lowerCamelCase__ , Image.Image ): lowerCAmelCase_ , lowerCAmelCase_: Dict = image.size else: lowerCAmelCase_ , lowerCAmelCase_: Tuple = image.shape[1], image.shape[2] if w < h: lowerCAmelCase_: List[Any] = int(self.size["shortest_edge"] * h / w ) lowerCAmelCase_: List[Any] = self.size["shortest_edge"] elif w > h: lowerCAmelCase_: Union[str, Any] = self.size["shortest_edge"] lowerCAmelCase_: List[str] = int(self.size["shortest_edge"] * w / h ) else: lowerCAmelCase_: str = self.size["shortest_edge"] lowerCAmelCase_: int = self.size["shortest_edge"] else: lowerCAmelCase_: Dict = [] for image in image_inputs: lowerCAmelCase_ , lowerCAmelCase_: Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase_: List[Any] = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0] lowerCAmelCase_: str = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1] return expected_height, expected_width def _a ( self ): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class _lowercase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE: Dict = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string SCREAMING_SNAKE_CASE: Optional[int] = image_processing_class def _a ( self ): lowerCAmelCase_: Optional[int] = OneFormerImageProcessorTester(self ) @property def _a ( self ): return self.image_processing_tester.prepare_image_processor_dict() def _a ( self ): lowerCAmelCase_: Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "ignore_index" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "class_info_file" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "num_text" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "repo_path" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "metadata" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_reduce_labels" ) ) def _a ( self ): pass def _a ( self ): # Initialize image_processor lowerCAmelCase_: Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_: List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input lowerCAmelCase_: Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_: List[Any] = self.image_processing_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.image_processing_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) lowerCAmelCase_: int = image_processor( lowerCamelCase__ , ["semantic"] * len(lowerCamelCase__ ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self ): # Initialize image_processor lowerCAmelCase_: Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_: List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input lowerCAmelCase_: List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_: str = self.image_processing_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ , lowerCAmelCase_: Tuple = self.image_processing_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) lowerCAmelCase_: Any = image_processor( lowerCamelCase__ , ["semantic"] * len(lowerCamelCase__ ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self ): # Initialize image_processor lowerCAmelCase_: Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_: Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input lowerCAmelCase_: int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values lowerCAmelCase_ , lowerCAmelCase_: str = self.image_processing_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ , lowerCAmelCase_: List[str] = self.image_processing_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) lowerCAmelCase_: Tuple = image_processor( lowerCamelCase__ , ["semantic"] * len(lowerCamelCase__ ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__="np" ): lowerCAmelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowerCAmelCase_: List[Any] = self.image_processing_tester.num_labels lowerCAmelCase_: int = None lowerCAmelCase_: Dict = None lowerCAmelCase_: List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ ) if with_segmentation_maps: lowerCAmelCase_: str = num_labels if is_instance_map: lowerCAmelCase_: int = list(range(lowerCamelCase__ ) ) * 2 lowerCAmelCase_: Dict = dict(enumerate(lowerCamelCase__ ) ) lowerCAmelCase_: Tuple = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowerCAmelCase_: Any = [Image.fromarray(lowerCamelCase__ ) for annotation in annotations] lowerCAmelCase_: List[str] = image_processor( lowerCamelCase__ , ["semantic"] * len(lowerCamelCase__ ) , lowerCamelCase__ , return_tensors="pt" , instance_id_to_semantic_id=lowerCamelCase__ , pad_and_return_pixel_mask=lowerCamelCase__ , ) return inputs def _a ( self ): pass def _a ( self ): def common(lowerCamelCase__=False , lowerCamelCase__=None ): lowerCAmelCase_: Dict = self.comm_get_image_processor_inputs( with_segmentation_maps=lowerCamelCase__ , is_instance_map=lowerCamelCase__ , segmentation_type=lowerCamelCase__ ) lowerCAmelCase_: Any = inputs["mask_labels"] lowerCAmelCase_: Tuple = inputs["class_labels"] lowerCAmelCase_: Any = inputs["pixel_values"] lowerCAmelCase_: int = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(lowerCamelCase__ ) , self.image_processing_tester.num_text ) common() common(is_instance_map=lowerCamelCase__ ) common(is_instance_map=lowerCamelCase__ , segmentation_type="pil" ) common(is_instance_map=lowerCamelCase__ , segmentation_type="pil" ) def _a ( self ): lowerCAmelCase_: Optional[Any] = np.zeros((20, 50) ) lowerCAmelCase_: Optional[Any] = 1 lowerCAmelCase_: int = 1 lowerCAmelCase_: Union[str, Any] = 1 lowerCAmelCase_: str = binary_mask_to_rle(lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def _a ( self ): lowerCAmelCase_: Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) lowerCAmelCase_: int = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase_: List[str] = fature_extractor.post_process_semantic_segmentation(lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) lowerCAmelCase_: Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )] lowerCAmelCase_: Dict = fature_extractor.post_process_semantic_segmentation(lowerCamelCase__ , target_sizes=lowerCamelCase__ ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def _a ( self ): lowerCAmelCase_: Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) lowerCAmelCase_: Any = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase_: int = image_processor.post_process_instance_segmentation(lowerCamelCase__ , threshold=0 ) self.assertTrue(len(lowerCamelCase__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , lowerCamelCase__ ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def _a ( self ): lowerCAmelCase_: str = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) lowerCAmelCase_: Any = self.image_processing_tester.get_fake_oneformer_outputs() lowerCAmelCase_: List[Any] = image_processor.post_process_panoptic_segmentation(lowerCamelCase__ , threshold=0 ) self.assertTrue(len(lowerCamelCase__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , lowerCamelCase__ ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
613
def snake_case__ ( lowercase ): lowerCAmelCase_: Union[str, Any] = [1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: int = 0, 0, 0 lowerCAmelCase_: Union[str, Any] = ugly_nums[ia] * 2 lowerCAmelCase_: str = ugly_nums[ia] * 3 lowerCAmelCase_: Dict = ugly_nums[ia] * 5 for _ in range(1 , lowercase ): lowerCAmelCase_: Any = min(lowercase , lowercase , lowercase ) ugly_nums.append(lowercase ) if next_num == next_a: ia += 1 lowerCAmelCase_: str = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 lowerCAmelCase_: Optional[int] = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 lowerCAmelCase_: int = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
613
1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0 , lowerCamelCase__ = 2_2 ) -> int: __lowerCamelCase : int = range(1 , lowerCamelCase__ ) __lowerCamelCase : List[Any] = range(1 , lowerCamelCase__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
337
import os import pytest from attr import dataclass a ="""us-east-1""" # defaults region @dataclass class A_ : _UpperCAmelCase : str _UpperCAmelCase : Tuple = '''arn:aws:iam::558105141721:role/sagemaker_execution_role''' _UpperCAmelCase : Optional[int] = { '''task_name''': '''mnli''', '''per_device_train_batch_size''': 16, '''per_device_eval_batch_size''': 16, '''do_train''': True, '''do_eval''': True, '''do_predict''': True, '''output_dir''': '''/opt/ml/model''', '''overwrite_output_dir''': True, '''max_steps''': 500, '''save_steps''': 5_500, } _UpperCAmelCase : int = {**hyperparameters, '''max_steps''': 1_000} @property def lowerCAmelCase ( self : Dict): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase ( self : List[str]): return F"{self.framework}-transfromers-test" @property def lowerCAmelCase ( self : List[Any]): return F"./tests/sagemaker/scripts/{self.framework}" @property def lowerCAmelCase ( self : List[Any]): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: __lowerCamelCase : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
337
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def A (__lowerCamelCase :Optional[int] ): return x + 2 class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """x = 3""" _lowerCAmelCase = {} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) assert result == 3 self.assertDictEqual(_lowercase , {"""x""": 3} ) _lowerCAmelCase = """x = y""" _lowerCAmelCase = {"""y""": 5} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_lowercase , {"""x""": 5, """y""": 5} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """y = add_two(x)""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {"""add_two""": add_two} , state=_lowercase ) assert result == 5 self.assertDictEqual(_lowercase , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) assert result is None assert "tried to execute add_two" in out.out def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """x = 3""" _lowerCAmelCase = {} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) assert result == 3 self.assertDictEqual(_lowercase , {"""x""": 3} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """test_dict = {'x': x, 'y': add_two(x)}""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {"""add_two""": add_two} , state=_lowercase ) self.assertDictEqual(_lowercase , {"""x""": 3, """y""": 5} ) self.assertDictEqual(_lowercase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """x = 3\ny = 5""" _lowerCAmelCase = {} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_lowercase , {"""x""": 3, """y""": 5} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """text = f'This is x: {x}.'""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_lowercase , {"""x""": 3, """text""": """This is x: 3."""} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """if x <= 3:\n y = 2\nelse:\n y = 5""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_lowercase , {"""x""": 3, """y""": 2} ) _lowerCAmelCase = {"""x""": 8} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_lowercase , {"""x""": 8, """y""": 5} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """test_list = [x, add_two(x)]""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {"""add_two""": add_two} , state=_lowercase ) self.assertListEqual(_lowercase , [3, 5] ) self.assertDictEqual(_lowercase , {"""x""": 3, """test_list""": [3, 5]} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """y = x""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {} , state=_lowercase ) assert result == 3 self.assertDictEqual(_lowercase , {"""x""": 3, """y""": 3} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """test_list = [x, add_two(x)]\ntest_list[1]""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {"""add_two""": add_two} , state=_lowercase ) assert result == 5 self.assertDictEqual(_lowercase , {"""x""": 3, """test_list""": [3, 5]} ) _lowerCAmelCase = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" _lowerCAmelCase = {"""x""": 3} _lowerCAmelCase = evaluate(_lowercase , {"""add_two""": add_two} , state=_lowercase ) assert result == 5 self.assertDictEqual(_lowercase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = """x = 0\nfor i in range(3):\n x = i""" _lowerCAmelCase = {} _lowerCAmelCase = evaluate(_lowercase , {"""range""": range} , state=_lowercase ) assert result == 2 self.assertDictEqual(_lowercase , {"""x""": 2, """i""": 2} )
5
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowercase = logging.get_logger(__name__) class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : List[str] = ['''input_values''', '''padding_mask'''] def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ): """simple docstring""" super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase ) _lowerCAmelCase = chunk_length_s _lowerCAmelCase = overlap @property def _lowercase ( self ): """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _lowercase ( self ): """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs _lowerCAmelCase = True _lowerCAmelCase = bool( isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_lowercase , np.ndarray ): _lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa ) elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): _lowerCAmelCase = raw_audio.astype(np.floataa ) # always return batch if not is_batched: _lowerCAmelCase = [np.asarray(_lowercase ).T] # verify inputs are valid for idx, example in enumerate(_lowercase ): if example.ndim > 2: raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' ) _lowerCAmelCase = None _lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: _lowerCAmelCase = min(array.shape[0] for array in raw_audio ) _lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) ) _lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: _lowerCAmelCase = max(array.shape[0] for array in raw_audio ) _lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) ) _lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length _lowerCAmelCase = """max_length""" else: _lowerCAmelCase = input_values # normal padding on batch if padded_inputs is None: _lowerCAmelCase = self.pad( _lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , ) if padding: _lowerCAmelCase = padded_inputs.pop("""attention_mask""" ) _lowerCAmelCase = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: _lowerCAmelCase = example[..., None] input_values.append(example.T ) _lowerCAmelCase = input_values if return_tensors is not None: _lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase ) return padded_inputs
5
1
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class a__( unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : int = MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def a_ ( self): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""") lowerCAmelCase = unmasker("""My name is <mask>""") self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 38015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 25506, """token_str""": """ accuser"""}, ] , ) lowerCAmelCase = unmasker("""The largest city in France is <mask>""") self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-0_5, """token""": 38015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-0_5, """token""": 25506, """token_str""": """ accuser""", }, ] , ) lowerCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ {"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2941, """token_str""": """ Te"""}, ] , ) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""") lowerCAmelCase = unmasker("""My name is <mask>""") self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 35676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 16416, """token_str""": """ELS"""}, ] , ) lowerCAmelCase = unmasker("""The largest city in France is <mask>""") self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-0_5, """token""": 35676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 16416, """token_str""": """ELS"""}, ] , ) lowerCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 13606, """token_str""": """ Clara"""}, ] , ) lowerCAmelCase = unmasker("""My name is <mask> <mask>""" , top_k=2) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6) , [ [ { """score""": 2.2E-0_5, """token""": 35676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-0_5, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-0_5, """token""": 35676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-0_5, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""") # convert model to fp16 pipe.model.half() lowerCAmelCase = pipe("""Paris is the [MASK] of France.""") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) @slow @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""") self.run_large_test(__lowerCAmelCase) @slow @require_tf def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""") self.run_large_test(__lowerCAmelCase) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = unmasker("""My name is <mask>""") self.assertEqual( nested_simplify(__lowerCAmelCase) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""}, ] , ) lowerCAmelCase = unmasker("""The largest city in France is <mask>""") self.assertEqual( nested_simplify(__lowerCAmelCase) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12790, """token_str""": """ Lyon""", }, ] , ) lowerCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3) self.assertEqual( nested_simplify(__lowerCAmelCase) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""}, ] , ) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""") lowerCAmelCase = None lowerCAmelCase = None self.run_pipeline_test(__lowerCAmelCase , []) @require_tf def a_ ( self): """simple docstring""" lowerCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""") lowerCAmelCase = None lowerCAmelCase = None self.run_pipeline_test(__lowerCAmelCase , []) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""") lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase) lowerCAmelCase = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = fill_masker.tokenizer lowerCAmelCase = fill_masker.model lowerCAmelCase = fill_masker( f"This is a {tokenizer.mask_token}" , ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ] , ) lowerCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ] , ) lowerCAmelCase = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( __lowerCAmelCase , [ [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ], [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ], ] , ) with self.assertRaises(__lowerCAmelCase): fill_masker([None]) # No mask_token is not supported with self.assertRaises(__lowerCAmelCase): fill_masker("""This is""") self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase) self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase) self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase) self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase) self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = tokenizer.get_vocab() lowerCAmelCase = sorted(vocab.keys())[:2] # Pipeline argument lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase) lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ] , ) lowerCAmelCase = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase) lowerCAmelCase = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase)) # Call argument lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase) lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=__lowerCAmelCase) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ] , ) lowerCAmelCase = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase) lowerCAmelCase = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase)) # Score equivalence lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=__lowerCAmelCase) lowerCAmelCase = [top_mask["""token_str"""] for top_mask in outputs] lowerCAmelCase = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowerCAmelCase) == set(__lowerCAmelCase): lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=__lowerCAmelCase) lowerCAmelCase = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__lowerCAmelCase) , nested_simplify(__lowerCAmelCase)) # Raises with invalid with self.assertRaises(__lowerCAmelCase): lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__lowerCAmelCase): lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""]) with self.assertRaises(__lowerCAmelCase): lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""") def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2) lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ] , ) lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase) lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ] , ) self.assertEqual(nested_simplify(__lowerCAmelCase) , nested_simplify(__lowerCAmelCase)) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = tokenizer.get_vocab() lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase) # top_k=2, ntargets=3 lowerCAmelCase = sorted(vocab.keys())[:3] lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=__lowerCAmelCase) # If we use the most probably targets, and filter differently, we should still # have the same results lowerCAmelCase = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase: x["score"] , reverse=__lowerCAmelCase)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowerCAmelCase).issubset(__lowerCAmelCase): lowerCAmelCase = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=__lowerCAmelCase) # They should yield exactly the same result self.assertEqual(nested_simplify(__lowerCAmelCase) , nested_simplify(__lowerCAmelCase)) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase) lowerCAmelCase = tokenizer.get_vocab() # String duplicates + id duplicates lowerCAmelCase = sorted(vocab.keys())[:3] lowerCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]] lowerCAmelCase = fill_masker(f"My name is {tokenizer.mask_token}" , targets=__lowerCAmelCase , top_k=10) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__lowerCAmelCase) , 3) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase) lowerCAmelCase = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2) self.assertEqual( __lowerCAmelCase , [ [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ], [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ], [ {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, {"""sequence""": ANY(__lowerCAmelCase), """score""": ANY(__lowerCAmelCase), """token""": ANY(__lowerCAmelCase), """token_str""": ANY(__lowerCAmelCase)}, ], ] , )
710
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''switch_transformers''' UpperCAmelCase_ : Tuple = ['''past_key_values'''] UpperCAmelCase_ : List[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , __lowerCAmelCase=32128 , __lowerCAmelCase=768 , __lowerCAmelCase=64 , __lowerCAmelCase=2048 , __lowerCAmelCase=64 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=12 , __lowerCAmelCase=8 , __lowerCAmelCase=False , __lowerCAmelCase=0.01 , __lowerCAmelCase="float32" , __lowerCAmelCase=False , __lowerCAmelCase=32 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.001 , __lowerCAmelCase=0.001 , __lowerCAmelCase=1.0 , __lowerCAmelCase="relu" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}") lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split("""-""") lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == """gated""" if len(__lowerCAmelCase) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase) > 2: raise ValueError( f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""") # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = """gelu_new""" super().__init__( pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase , )
605
0
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( a__ , unittest.TestCase ): __UpperCamelCase =LEDTokenizer __UpperCamelCase =LEDTokenizerFast __UpperCamelCase =True def UpperCamelCase ( self : Any ): """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) ) def UpperCamelCase ( self : List[Any] , **snake_case__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( self : Dict , **snake_case__ : int ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] ): """simple docstring""" return "lower newer", "lower newer" @cached_property def UpperCamelCase ( self : List[str] ): """simple docstring""" return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def UpperCamelCase ( self : Dict ): """simple docstring""" return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] SCREAMING_SNAKE_CASE = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @require_torch def UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) self.assertIn('input_ids' , SCREAMING_SNAKE_CASE_ ) self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ ) self.assertNotIn('labels' , SCREAMING_SNAKE_CASE_ ) self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE_ ) @require_torch def UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=3_2 , padding='max_length' , return_tensors='pt' ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) @require_torch def UpperCamelCase ( self : Dict ): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE = tokenizer( ['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) ) @require_torch def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.'] SCREAMING_SNAKE_CASE = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE = inputs['input_ids'] SCREAMING_SNAKE_CASE = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: SCREAMING_SNAKE_CASE = ['Summary of the text.', 'Another summary.'] SCREAMING_SNAKE_CASE = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output['input_ids']] SCREAMING_SNAKE_CASE = tokenizer.pad(SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(outputs['global_attention_mask'] , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( self : Any ): """simple docstring""" pass def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
439
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
40
0
# Algorithm for the pigeonhole sorting def _A ( _UpperCamelCase ): _UpperCAmelCase : int = min(_UpperCamelCase ) # min() finds the minimum value _UpperCAmelCase : Any = max(_UpperCamelCase ) # max() finds the maximum value _UpperCAmelCase : str = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size _UpperCAmelCase : Union[str, Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_UpperCamelCase , _UpperCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. _UpperCAmelCase : Tuple = 0 for count in range(_UpperCamelCase ): while holes[count] > 0: holes[count] -= 1 _UpperCAmelCase : List[str] = count + min_val i += 1 def _A ( ): _UpperCAmelCase : Union[str, Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_UpperCamelCase ) print('''Sorted order is:''' , ''' '''.join(_UpperCamelCase ) ) if __name__ == "__main__": main()
416
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase_ ( datasets.BeamBasedBuilder ): def a_ ( self : Dict ) -> Dict: '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=UpperCAmelCase_ , ) def a_ ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Dict: '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def a_ ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Any: '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase_ ) class lowerCAmelCase_ ( datasets.BeamBasedBuilder ): def a_ ( self : str ) -> Union[str, Any]: '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=UpperCAmelCase_ , ) def a_ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def a_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase_ ) def _A ( ): return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def _A ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class lowerCAmelCase_ ( lowercase_ ): @require_beam def a_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase : Dict = DummyBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _UpperCAmelCase : str = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , UpperCAmelCase_ ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , UpperCAmelCase_ ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a_ ( self : List[Any] ) -> str: '''simple docstring''' import apache_beam as beam _UpperCAmelCase : List[str] = beam.io.parquetio.WriteToParquet _UpperCAmelCase : Optional[Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase : Union[str, Any] = DummyBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: _UpperCAmelCase : List[str] = partial(UpperCAmelCase_ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertTrue( os.path.exists( os.path.join( UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) _UpperCAmelCase : Dict = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , UpperCAmelCase_ ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , UpperCAmelCase_ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a_ ( self : str ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase : str = DummyBeamDataset(cache_dir=UpperCAmelCase_ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: _UpperCAmelCase : Optional[Any] = NestedBeamDataset(cache_dir=UpperCAmelCase_ , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , F'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) _UpperCAmelCase : List[str] = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , UpperCAmelCase_ ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , UpperCAmelCase_ ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(UpperCAmelCase_ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
416
1
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
624
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not nums: raise ValueError('''List is empty''' ) return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
39
0
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class a ( __a ): lowercase_ : List[str] = '''encodec''' def __init__( self : List[Any] , snake_case__ : str=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , snake_case__ : int=24_000 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=None , snake_case__ : Optional[int]=None , snake_case__ : str=128 , snake_case__ : Tuple=32 , snake_case__ : Optional[int]=1 , snake_case__ : Dict=[8, 5, 4, 2] , snake_case__ : List[Any]="weight_norm" , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]=7 , snake_case__ : List[Any]=3 , snake_case__ : Optional[int]=2 , snake_case__ : List[str]=True , snake_case__ : str="reflect" , snake_case__ : int=2 , snake_case__ : Union[str, Any]=2 , snake_case__ : str=1.0 , snake_case__ : str=1_024 , snake_case__ : str=None , snake_case__ : List[Any]=True , **snake_case__ : Any , ): """simple docstring""" __lowerCAmelCase = target_bandwidths __lowerCAmelCase = sampling_rate __lowerCAmelCase = audio_channels __lowerCAmelCase = normalize __lowerCAmelCase = chunk_length_s __lowerCAmelCase = overlap __lowerCAmelCase = hidden_size __lowerCAmelCase = num_filters __lowerCAmelCase = num_residual_layers __lowerCAmelCase = upsampling_ratios __lowerCAmelCase = norm_type __lowerCAmelCase = kernel_size __lowerCAmelCase = last_kernel_size __lowerCAmelCase = residual_kernel_size __lowerCAmelCase = dilation_growth_rate __lowerCAmelCase = use_causal_conv __lowerCAmelCase = pad_mode __lowerCAmelCase = compress __lowerCAmelCase = num_lstm_layers __lowerCAmelCase = trim_right_ratio __lowerCAmelCase = codebook_size __lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size __lowerCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**snake_case__ ) @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def UpperCAmelCase__ ( self : str ): """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __lowerCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def UpperCAmelCase__ ( self : str ): """simple docstring""" return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
706
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class a ( __UpperCAmelCase ): lowercase_ : Tuple = ['image_processor'] lowercase_ : Tuple = 'SamImageProcessor' def __init__( self : List[str] , snake_case__ : Optional[Any] ): """simple docstring""" super().__init__(snake_case__ ) __lowerCAmelCase = self.image_processor __lowerCAmelCase = -10 __lowerCAmelCase = self.image_processor.size["longest_edge"] def __call__( self : Any , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None , snake_case__ : str=None , snake_case__ : List[str]=None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Any , ): """simple docstring""" __lowerCAmelCase = self.image_processor( snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # pop arguments that are not used in the foward but used nevertheless __lowerCAmelCase = encoding_image_processor["original_sizes"] if hasattr(snake_case__ , "numpy" ): # Checks if Torch or TF tensor __lowerCAmelCase = original_sizes.numpy() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points( input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , ) __lowerCAmelCase = self._normalize_and_convert( snake_case__ , snake_case__ , input_points=snake_case__ , input_labels=snake_case__ , input_boxes=snake_case__ , return_tensors=snake_case__ , ) return encoding_image_processor def UpperCAmelCase__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : str=None , snake_case__ : Any=None , snake_case__ : Any=None , snake_case__ : List[str]="pt" , ): """simple docstring""" if input_points is not None: if len(snake_case__ ) != len(snake_case__ ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] ) for point in input_points ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ ) for point, original_size in zip(snake_case__ , snake_case__ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: __lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(snake_case__ , snake_case__ ) __lowerCAmelCase = np.array(snake_case__ ) if input_labels is not None: __lowerCAmelCase = np.array(snake_case__ ) if input_boxes is not None: if len(snake_case__ ) != len(snake_case__ ): __lowerCAmelCase = [ self._normalize_coordinates(self.target_size , snake_case__ , original_sizes[0] , is_bounding_box=snake_case__ ) for box in input_boxes ] else: __lowerCAmelCase = [ self._normalize_coordinates(self.target_size , snake_case__ , snake_case__ , is_bounding_box=snake_case__ ) for box, original_size in zip(snake_case__ , snake_case__ ) ] __lowerCAmelCase = np.array(snake_case__ ) if input_boxes is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(snake_case__ ) # boxes batch size of 1 by default __lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(snake_case__ ) # boxes batch size of 1 by default __lowerCAmelCase = tf.expand_dims(snake_case__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes} ) if input_points is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(snake_case__ ) # point batch size of 1 by default __lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(snake_case__ ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(snake_case__ , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"input_points": input_points} ) if input_labels is not None: if return_tensors == "pt": __lowerCAmelCase = torch.from_numpy(snake_case__ ) # point batch size of 1 by default __lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": __lowerCAmelCase = tf.convert_to_tensor(snake_case__ ) # point batch size of 1 by default __lowerCAmelCase = tf.expand_dims(snake_case__ , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels} ) return encoding_image_processor def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Dict ): """simple docstring""" __lowerCAmelCase = max([point.shape[0] for point in input_points] ) __lowerCAmelCase = [] for i, point in enumerate(snake_case__ ): if point.shape[0] != expected_nb_points: __lowerCAmelCase = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) __lowerCAmelCase = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(snake_case__ ) __lowerCAmelCase = processed_input_points return input_points, input_labels def UpperCAmelCase__ ( self : Dict , snake_case__ : int , snake_case__ : np.ndarray , snake_case__ : List[str] , snake_case__ : Dict=False ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = original_size __lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(snake_case__ , longest_edge=snake_case__ ) __lowerCAmelCase = deepcopy(snake_case__ ).astype(snake_case__ ) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1 , 2 , 2 ) __lowerCAmelCase = coords[..., 0] * (new_w / old_w) __lowerCAmelCase = coords[..., 1] * (new_h / old_h) if is_bounding_box: __lowerCAmelCase = coords.reshape(-1 , 4 ) return coords def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , snake_case__ : Any=None , ): """simple docstring""" if input_points is not None: if hasattr(snake_case__ , "numpy" ): # Checks for TF or Torch tensor __lowerCAmelCase = input_points.numpy().tolist() if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_points[0] , snake_case__ ): raise ValueError("Input points must be a list of list of floating points." ) __lowerCAmelCase = [np.array(snake_case__ ) for input_point in input_points] else: __lowerCAmelCase = None if input_labels is not None: if hasattr(snake_case__ , "numpy" ): __lowerCAmelCase = input_labels.numpy().tolist() if not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_labels[0] , snake_case__ ): raise ValueError("Input labels must be a list of list integers." ) __lowerCAmelCase = [np.array(snake_case__ ) for label in input_labels] else: __lowerCAmelCase = None if input_boxes is not None: if hasattr(snake_case__ , "numpy" ): __lowerCAmelCase = input_boxes.numpy().tolist() if ( not isinstance(snake_case__ , snake_case__ ) or not isinstance(input_boxes[0] , snake_case__ ) or not isinstance(input_boxes[0][0] , snake_case__ ) ): raise ValueError("Input boxes must be a list of list of list of floating points." ) __lowerCAmelCase = [np.array(snake_case__ ).astype(np.floataa ) for box in input_boxes] else: __lowerCAmelCase = None return input_points, input_labels, input_boxes @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(snake_case__ ) ) def UpperCAmelCase__ ( self : Optional[Any] , *snake_case__ : Dict , **snake_case__ : Union[str, Any] ): """simple docstring""" return self.image_processor.post_process_masks(*snake_case__ , **snake_case__ )
376
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,) SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),) def __lowerCAmelCase ( self, **_a ) -> str: __SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00} config.update(**_a ) return config def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self, _a=0, **_a ) -> int: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] if time_step is None: __SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) __SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self, **_a ) -> Tuple: __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample for i, t in enumerate(scheduler.timesteps ): __SCREAMING_SNAKE_CASE = model(_a, _a ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: __SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs ) __SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a ) for scheduler_class in self.scheduler_classes: __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = self.dummy_sample __SCREAMING_SNAKE_CASE = 0.1 * sample if num_inference_steps is not None and hasattr(_a, "set_timesteps" ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ): __SCREAMING_SNAKE_CASE = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __SCREAMING_SNAKE_CASE = dummy_past_residuals[:] __SCREAMING_SNAKE_CASE = scheduler.timesteps[5] __SCREAMING_SNAKE_CASE = scheduler.timesteps[6] __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample __SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def __lowerCAmelCase ( self ) -> str: for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_a, time_step=_a ) def __lowerCAmelCase ( self ) -> Any: __SCREAMING_SNAKE_CASE = self.full_loop() __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
693
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _snake_case , _snake_case , _snake_case : List[Any] = False, False, False @dataclass class __SCREAMING_SNAKE_CASE : SCREAMING_SNAKE_CASE__ =None SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =True SCREAMING_SNAKE_CASE__ =None # Automatically constructed SCREAMING_SNAKE_CASE__ ="dict" SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE ) def __call__( self ) -> Optional[int]: return self.pa_type def __lowerCAmelCase ( self, _a ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(_a, _a ): return {"bytes": None, "path": value} elif isinstance(_a, _a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __SCREAMING_SNAKE_CASE = BytesIO() sf.write(_a, value["array"], value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: __SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67 __SCREAMING_SNAKE_CASE = BytesIO(bytes() ) sf.write(_a, _a, value["sampling_rate"], format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __lowerCAmelCase ( self, _a, _a = None ) -> dict: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err __SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: __SCREAMING_SNAKE_CASE = token_per_repo_id or {} __SCREAMING_SNAKE_CASE = path.split("::" )[-1] try: __SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"] __SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id] except (ValueError, KeyError): __SCREAMING_SNAKE_CASE = None with xopen(_a, "rb", use_auth_token=_a ) as f: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a ) __SCREAMING_SNAKE_CASE = array.T if self.mono: __SCREAMING_SNAKE_CASE = librosa.to_mono(_a ) if self.sampling_rate and self.sampling_rate != sampling_rate: __SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate ) __SCREAMING_SNAKE_CASE = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __lowerCAmelCase ( self, _a ) -> pa.StructArray: if pa.types.is_string(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): __SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("bytes" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: __SCREAMING_SNAKE_CASE = storage.field("path" ) else: __SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() ) return array_cast(_a, self.pa_type ) def __lowerCAmelCase ( self, _a ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(_a ): with xopen(_a, "rb" ) as f: __SCREAMING_SNAKE_CASE = f.read() return bytes_ __SCREAMING_SNAKE_CASE = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) __SCREAMING_SNAKE_CASE = pa.array( [os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), ) __SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(_a, self.pa_type )
693
1
"""simple docstring""" def snake_case ( ) -> Tuple: _snake_case = 0 for i in range(1 , 1001 ): total += i**i return str(lowerCAmelCase_ )[-10:] if __name__ == "__main__": print(solution())
404
"""simple docstring""" def snake_case ( lowerCAmelCase_ = 1000 ) -> int: return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
404
1
lowerCamelCase__ = """Alexander Joslin""" import operator as op from .stack import Stack def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" __a = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} __a = Stack() __a = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_SCREAMING_SNAKE_CASE ) ) elif i in operators: # RULE 2 operator_stack.push(_SCREAMING_SNAKE_CASE ) elif i == ")": # RULE 4 __a = operator_stack.peek() operator_stack.pop() __a = operand_stack.peek() operand_stack.pop() __a = operand_stack.peek() operand_stack.pop() __a = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) operand_stack.push(_SCREAMING_SNAKE_CASE ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
225
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] ): '''simple docstring''' __a = {} # Mapping from char to TrieNode __a = False def UpperCamelCase_ ( self : Dict , __lowercase : list[str] ): '''simple docstring''' for word in words: self.insert(__lowercase ) def UpperCamelCase_ ( self : List[Any] , __lowercase : str ): '''simple docstring''' __a = self for char in word: if char not in curr.nodes: __a = TrieNode() __a = curr.nodes[char] __a = True def UpperCamelCase_ ( self : Dict , __lowercase : str ): '''simple docstring''' __a = self for char in word: if char not in curr.nodes: return False __a = curr.nodes[char] return curr.is_leaf def UpperCamelCase_ ( self : List[str] , __lowercase : str ): '''simple docstring''' def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool: if index == len(__lowercase ): # If word does not exist if not curr.is_leaf: return False __a = False return len(curr.nodes ) == 0 __a = word[index] __a = curr.nodes.get(__lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __a = _delete(__lowercase , __lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , __lowercase , 0 ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" if node.is_leaf: print(_SCREAMING_SNAKE_CASE , end=""" """ ) for key, value in node.nodes.items(): print_words(_SCREAMING_SNAKE_CASE , word + key ) def lowerCAmelCase__ ( ): """simple docstring""" __a = """banana bananas bandana band apple all beast""".split() __a = TrieNode() root.insert_many(_SCREAMING_SNAKE_CASE ) # print_words(root, "") assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ): """simple docstring""" print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" ) def lowerCAmelCase__ ( ): """simple docstring""" assert test_trie() def lowerCAmelCase__ ( ): """simple docstring""" print_results("""Testing trie functionality""" , test_trie() ) if __name__ == "__main__": main()
225
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __A : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , ) ->List[Any]: """simple docstring""" snake_case_ = parent snake_case_ = 13 snake_case_ = 7 snake_case_ = True snake_case_ = True snake_case_ = True snake_case_ = True snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = 2 snake_case_ = 99 snake_case_ = 0 snake_case_ = 32 snake_case_ = 2 snake_case_ = 4 snake_case_ = 0.1 snake_case_ = 0.1 snake_case_ = 512 snake_case_ = 16 snake_case_ = 2 snake_case_ = 0.02 snake_case_ = 3 snake_case_ = 4 snake_case_ = """last""" snake_case_ = True snake_case_ = None snake_case_ = 0 def lowerCAmelCase ( self : int ) ->Tuple: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) snake_case_ = None if self.use_input_lengths: snake_case_ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , ) ->Optional[Any]: """simple docstring""" snake_case_ = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE ) snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} snake_case_ = model(_SCREAMING_SNAKE_CASE ) snake_case_ = [input_ids, input_mask] snake_case_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , ) ->Tuple: """simple docstring""" snake_case_ = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE ) snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} snake_case_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ) ->Tuple: """simple docstring""" snake_case_ = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE ) snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths} snake_case_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->Union[str, Any]: """simple docstring""" snake_case_ = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE ) snake_case_ = {"""input_ids""": input_ids, """lengths""": input_lengths} snake_case_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , ) ->Tuple: """simple docstring""" snake_case_ = self.num_labels snake_case_ = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} snake_case_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , ) ->Any: """simple docstring""" snake_case_ = self.num_choices snake_case_ = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) snake_case_ = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) snake_case_ = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } snake_case_ = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : int ) ->int: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: List[Any] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) __lowercase: Dict = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __lowercase: Union[str, Any] = ( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) __lowercase: Any = False __lowercase: Optional[Any] = False def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase ( self : List[Any] ) ->Dict: """simple docstring""" snake_case_ = TFFlaubertModelTester(self ) snake_case_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 ) def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : Any ) ->Any: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : List[Any] ) ->int: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE ) def lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) @slow def lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_tf @require_sentencepiece @require_tokenizers class __A (unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) ->Any: """simple docstring""" snake_case_ = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) snake_case_ = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" snake_case_ = model(_SCREAMING_SNAKE_CASE )[0] snake_case_ = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. snake_case_ = tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
721
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __SCREAMING_SNAKE_CASE : Union[str, Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __SCREAMING_SNAKE_CASE : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} __SCREAMING_SNAKE_CASE : Dict = 'zero2' __SCREAMING_SNAKE_CASE : List[Any] = 'zero3' __SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa] def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param snake_case_ = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) ) return f"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test __SCREAMING_SNAKE_CASE : Dict = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A (snake_case__): '''simple docstring''' @parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ ) def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) ->Any: """simple docstring""" self.run_and_check( stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , ) @require_torch_multi_gpu @parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]: """simple docstring""" self.run_and_check( stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , ) @parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ ) def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]: """simple docstring""" self.run_and_check( stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , ) @require_torch_multi_gpu @parameterized.expand(UpperCAmelCase_ , name_func=UpperCAmelCase_ ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) ->Optional[int]: """simple docstring""" self.run_and_check( stage=UpperCAmelCase_ , model=UpperCAmelCase_ , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , ) def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]: """simple docstring""" pass def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]: """simple docstring""" snake_case_ = models[model] snake_case_ = self.run_trainer( stage=UpperCAmelCase_ , model_name=UpperCAmelCase_ , eval_steps=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , fpaa=UpperCAmelCase_ , ) self.do_checks(UpperCAmelCase_ ) return output_dir def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) ->List[str]: """simple docstring""" snake_case_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCAmelCase_ ) snake_case_ = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(UpperCAmelCase_ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files snake_case_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() snake_case_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] snake_case_ = self.get_launcher(UpperCAmelCase_ ) snake_case_ = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() ) return output_dir def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any=False ) ->Tuple: """simple docstring""" snake_case_ = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
2
0
"""simple docstring""" from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO Update this lowerCAmelCase__ = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class _lowerCAmelCase ( __UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = 'esm' def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3_0_7_2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1_0_2_6 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-12 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[str]: super().__init__(pad_token_id=lowerCAmelCase_ , mask_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE : str = vocab_size _SCREAMING_SNAKE_CASE : Any = hidden_size _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads _SCREAMING_SNAKE_CASE : List[Any] = intermediate_size _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range _SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps _SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type _SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache _SCREAMING_SNAKE_CASE : Optional[Any] = emb_layer_norm_before _SCREAMING_SNAKE_CASE : int = token_dropout _SCREAMING_SNAKE_CASE : Tuple = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.' ) _SCREAMING_SNAKE_CASE : int = EsmFoldConfig() elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _SCREAMING_SNAKE_CASE : Tuple = EsmFoldConfig(**lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE : Optional[int] = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' ) _SCREAMING_SNAKE_CASE : int = get_default_vocab_list() else: _SCREAMING_SNAKE_CASE : Optional[int] = vocab_list else: _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : Any = None if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowerCAmelCase_ ): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' ) def A ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[int] = super().to_dict() if isinstance(self.esmfold_config , lowerCAmelCase_ ): _SCREAMING_SNAKE_CASE : str = self.esmfold_config.to_dict() return output @dataclass class _lowerCAmelCase : SCREAMING_SNAKE_CASE_: str = None SCREAMING_SNAKE_CASE_: bool = True SCREAMING_SNAKE_CASE_: bool = False SCREAMING_SNAKE_CASE_: bool = False SCREAMING_SNAKE_CASE_: bool = False SCREAMING_SNAKE_CASE_: float = 0 SCREAMING_SNAKE_CASE_: bool = True SCREAMING_SNAKE_CASE_: bool = False SCREAMING_SNAKE_CASE_: int = 128 SCREAMING_SNAKE_CASE_: "TrunkConfig" = None def A ( self ) -> Any: if self.trunk is None: _SCREAMING_SNAKE_CASE : Tuple = TrunkConfig() elif isinstance(self.trunk , lowerCAmelCase_ ): _SCREAMING_SNAKE_CASE : str = TrunkConfig(**self.trunk ) def A ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : Union[str, Any] = asdict(self ) _SCREAMING_SNAKE_CASE : str = self.trunk.to_dict() return output @dataclass class _lowerCAmelCase : SCREAMING_SNAKE_CASE_: int = 48 SCREAMING_SNAKE_CASE_: int = 1024 SCREAMING_SNAKE_CASE_: int = 128 SCREAMING_SNAKE_CASE_: int = 32 SCREAMING_SNAKE_CASE_: int = 32 SCREAMING_SNAKE_CASE_: int = 32 SCREAMING_SNAKE_CASE_: float = 0 SCREAMING_SNAKE_CASE_: float = 0 SCREAMING_SNAKE_CASE_: bool = False SCREAMING_SNAKE_CASE_: int = 4 SCREAMING_SNAKE_CASE_: Optional[int] = 128 SCREAMING_SNAKE_CASE_: "StructureModuleConfig" = None def A ( self ) -> List[str]: if self.structure_module is None: _SCREAMING_SNAKE_CASE : str = StructureModuleConfig() elif isinstance(self.structure_module , lowerCAmelCase_ ): _SCREAMING_SNAKE_CASE : Tuple = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got' F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got' F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width _SCREAMING_SNAKE_CASE : Any = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got' F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got' F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def A ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = asdict(self ) _SCREAMING_SNAKE_CASE : List[str] = self.structure_module.to_dict() return output @dataclass class _lowerCAmelCase : SCREAMING_SNAKE_CASE_: int = 384 SCREAMING_SNAKE_CASE_: int = 128 SCREAMING_SNAKE_CASE_: int = 16 SCREAMING_SNAKE_CASE_: int = 128 SCREAMING_SNAKE_CASE_: int = 12 SCREAMING_SNAKE_CASE_: int = 4 SCREAMING_SNAKE_CASE_: int = 8 SCREAMING_SNAKE_CASE_: float = 0.1 SCREAMING_SNAKE_CASE_: int = 8 SCREAMING_SNAKE_CASE_: int = 1 SCREAMING_SNAKE_CASE_: int = 2 SCREAMING_SNAKE_CASE_: int = 7 SCREAMING_SNAKE_CASE_: int = 10 SCREAMING_SNAKE_CASE_: float = 1E-8 SCREAMING_SNAKE_CASE_: float = 1E5 def A ( self ) -> Optional[int]: return asdict(self ) def lowercase__ ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
621
"""simple docstring""" import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') lowerCAmelCase__ = logging.getLogger(__name__) @dataclass class _lowerCAmelCase : SCREAMING_SNAKE_CASE_: Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) SCREAMING_SNAKE_CASE_: bool = field( default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) SCREAMING_SNAKE_CASE_: bool = field( default=__UpperCAmelCase , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) SCREAMING_SNAKE_CASE_: Optional[int] = field( default=__UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) SCREAMING_SNAKE_CASE_: Optional[int] = field( default=__UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) SCREAMING_SNAKE_CASE_: Optional[int] = field( default=__UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) @dataclass class _lowerCAmelCase : SCREAMING_SNAKE_CASE_: str = field( default=__UpperCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) SCREAMING_SNAKE_CASE_: str = field( default=__UpperCAmelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} ) SCREAMING_SNAKE_CASE_: Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} ) SCREAMING_SNAKE_CASE_: Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) SCREAMING_SNAKE_CASE_: Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) SCREAMING_SNAKE_CASE_: Optional[str] = field( default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) SCREAMING_SNAKE_CASE_: Optional[bool] = field( default=__UpperCAmelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , ) SCREAMING_SNAKE_CASE_: bool = field( default=__UpperCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) SCREAMING_SNAKE_CASE_: str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) SCREAMING_SNAKE_CASE_: bool = field( default=__UpperCAmelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) SCREAMING_SNAKE_CASE_: bool = field( default=__UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def lowercase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_xnli', lowerCamelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : Any = training_args.get_process_log_level() logger.setLevel(lowerCamelCase ) datasets.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.set_verbosity(lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _SCREAMING_SNAKE_CASE : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _SCREAMING_SNAKE_CASE : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: _SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset( 'xnli', model_args.language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: _SCREAMING_SNAKE_CASE : int = load_dataset( 'xnli', model_args.train_language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) _SCREAMING_SNAKE_CASE : Dict = train_dataset.features['label'].names if training_args.do_eval: _SCREAMING_SNAKE_CASE : Any = load_dataset( 'xnli', model_args.language, split='validation', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) _SCREAMING_SNAKE_CASE : List[Any] = eval_dataset.features['label'].names if training_args.do_predict: _SCREAMING_SNAKE_CASE : Optional[int] = load_dataset( 'xnli', model_args.language, split='test', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) _SCREAMING_SNAKE_CASE : int = predict_dataset.features['label'].names # Labels _SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowerCamelCase, idalabel={str(lowerCamelCase ): label for i, label in enumerate(lowerCamelCase )}, labelaid={label: i for i, label in enumerate(lowerCamelCase )}, finetuning_task='xnli', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: _SCREAMING_SNAKE_CASE : Tuple = 'max_length' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _SCREAMING_SNAKE_CASE : Tuple = False def preprocess_function(lowerCamelCase ): # Tokenize the texts return tokenizer( examples['premise'], examples['hypothesis'], padding=lowerCamelCase, max_length=data_args.max_seq_length, truncation=lowerCamelCase, ) if training_args.do_train: if data_args.max_train_samples is not None: _SCREAMING_SNAKE_CASE : List[Any] = min(len(lowerCamelCase ), data_args.max_train_samples ) _SCREAMING_SNAKE_CASE : Tuple = train_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): _SCREAMING_SNAKE_CASE : Tuple = train_dataset.map( lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on train dataset', ) # Log a few random samples from the training set: for index in random.sample(range(len(lowerCamelCase ) ), 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: _SCREAMING_SNAKE_CASE : Dict = min(len(lowerCamelCase ), data_args.max_eval_samples ) _SCREAMING_SNAKE_CASE : Optional[Any] = eval_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): _SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map( lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on validation dataset', ) if training_args.do_predict: if data_args.max_predict_samples is not None: _SCREAMING_SNAKE_CASE : Tuple = min(len(lowerCamelCase ), data_args.max_predict_samples ) _SCREAMING_SNAKE_CASE : Dict = predict_dataset.select(range(lowerCamelCase ) ) with training_args.main_process_first(desc='prediction dataset map pre-processing' ): _SCREAMING_SNAKE_CASE : Tuple = predict_dataset.map( lowerCamelCase, batched=lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on prediction dataset', ) # Get the metric function _SCREAMING_SNAKE_CASE : Tuple = evaluate.load('xnli' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCamelCase ): _SCREAMING_SNAKE_CASE : Dict = p.predictions[0] if isinstance(p.predictions, lowerCamelCase ) else p.predictions _SCREAMING_SNAKE_CASE : int = np.argmax(lowerCamelCase, axis=1 ) return metric.compute(predictions=lowerCamelCase, references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _SCREAMING_SNAKE_CASE : List[str] = default_data_collator elif training_args.fpaa: _SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase, pad_to_multiple_of=8 ) else: _SCREAMING_SNAKE_CASE : Tuple = None # Initialize our Trainer _SCREAMING_SNAKE_CASE : Optional[Any] = Trainer( model=lowerCamelCase, args=lowerCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=lowerCamelCase, tokenizer=lowerCamelCase, data_collator=lowerCamelCase, ) # Training if training_args.do_train: _SCREAMING_SNAKE_CASE : List[Any] = None if training_args.resume_from_checkpoint is not None: _SCREAMING_SNAKE_CASE : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = last_checkpoint _SCREAMING_SNAKE_CASE : List[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = train_result.metrics _SCREAMING_SNAKE_CASE : str = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Tuple = min(lowerCamelCase, len(lowerCamelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train', lowerCamelCase ) trainer.save_metrics('train', lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) _SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate(eval_dataset=lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = min(lowerCamelCase, len(lowerCamelCase ) ) trainer.log_metrics('eval', lowerCamelCase ) trainer.save_metrics('eval', lowerCamelCase ) # Prediction if training_args.do_predict: logger.info('*** Predict ***' ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = trainer.predict(lowerCamelCase, metric_key_prefix='predict' ) _SCREAMING_SNAKE_CASE : List[Any] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = min(lowerCamelCase, len(lowerCamelCase ) ) trainer.log_metrics('predict', lowerCamelCase ) trainer.save_metrics('predict', lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = np.argmax(lowerCamelCase, axis=1 ) _SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir, 'predictions.txt' ) if trainer.is_world_process_zero(): with open(lowerCamelCase, 'w' ) as writer: writer.write('index\tprediction\n' ) for index, item in enumerate(lowerCamelCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = label_list[item] writer.write(f"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
621
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase ={ """configuration_xlm_roberta_xl""": [ """XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaXLConfig""", """XLMRobertaXLOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ """XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaXLForCausalLM""", """XLMRobertaXLForMaskedLM""", """XLMRobertaXLForMultipleChoice""", """XLMRobertaXLForQuestionAnswering""", """XLMRobertaXLForSequenceClassification""", """XLMRobertaXLForTokenClassification""", """XLMRobertaXLModel""", """XLMRobertaXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
261
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ """openai/imagegpt-small""": """""", """openai/imagegpt-medium""": """""", """openai/imagegpt-large""": """""", } class lowerCAmelCase__ ( UpperCAmelCase_ ): lowercase__ : Dict = """imagegpt""" lowercase__ : str = ["""past_key_values"""] lowercase__ : Union[str, Any] = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , UpperCamelCase__=5_12 + 1 , UpperCamelCase__=32 * 32 , UpperCamelCase__=5_12 , UpperCamelCase__=24 , UpperCamelCase__=8 , UpperCamelCase__=None , UpperCamelCase__="quick_gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , **UpperCamelCase__ , ): '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = n_inner A__ = activation_function A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = scale_attn_weights A__ = use_cache A__ = scale_attn_by_inverse_layer_idx A__ = reorder_and_upcast_attn A__ = tie_word_embeddings super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ ) class lowerCAmelCase__ ( UpperCAmelCase_ ): @property def lowercase_ ( self ): '''simple docstring''' return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = 1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 3 , UpperCamelCase__ = 32 , UpperCamelCase__ = 32 , ): '''simple docstring''' A__ = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A__ = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) ) return inputs
261
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , ): '''simple docstring''' lowercase__ : Any = parent lowercase__ : Optional[Any] = 13 lowercase__ : Any = 7 lowercase__ : Optional[Any] = 30 lowercase__ : int = self.seq_length + self.mem_len lowercase__ : str = 15 lowercase__ : int = True lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = 99 lowercase__ : Any = [10, 50, 80] lowercase__ : str = 32 lowercase__ : Tuple = 32 lowercase__ : int = 4 lowercase__ : Tuple = 8 lowercase__ : Optional[int] = 1_28 lowercase__ : Any = 2 lowercase__ : Optional[int] = 2 lowercase__ : List[Any] = None lowercase__ : Union[str, Any] = 1 lowercase__ : List[Any] = 0 lowercase__ : Union[str, Any] = 3 lowercase__ : Tuple = self.vocab_size - 1 lowercase__ : Union[str, Any] = 0.0_1 def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : List[str] = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ : int = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowercase__ ( self): '''simple docstring''' random.seed(self.seed) tf.random.set_seed(self.seed) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[str] = TFTransfoXLModel(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a} lowercase__ , lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Union[str, Any] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE_) lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels} lowercase__ , lowercase__ : str = model(SCREAMING_SNAKE_CASE_).to_tuple() lowercase__ , lowercase__ : Dict = model([input_ids_a, mems_a]).to_tuple() lowercase__ : Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} lowercase__ , lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : str = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE_) lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Any = config_and_inputs lowercase__ : Any = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : List[Any] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __lowerCAmelCase : Union[str, Any] = () if is_tf_available() else () __lowerCAmelCase : Optional[int] = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __lowerCAmelCase : Any = False __lowerCAmelCase : Dict = False __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : List[Any] = False def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = TFTransfoXLModelTester(self) lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , d_embed=37) def lowercase__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self): '''simple docstring''' self.model_tester.set_seed() lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' self.model_tester.set_seed() lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: lowercase__ : Tuple = model.get_output_embeddings() assert isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer) lowercase__ : List[Any] = model.get_bias() assert name is None else: lowercase__ : Union[str, Any] = model.get_output_embeddings() assert x is None lowercase__ : Optional[Any] = model.get_bias() assert name is None def lowercase__ ( self): '''simple docstring''' pass @slow def lowercase__ ( self): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE_) self.assertIsNotNone(SCREAMING_SNAKE_CASE_) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""") def lowercase__ ( self): '''simple docstring''' pass @require_tf class _snake_case ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""") @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""") # fmt: off lowercase__ : int = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase__ : Optional[int] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase__ : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE_) self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_)
12
'''simple docstring''' import math lowerCamelCase :int = 1_0 lowerCamelCase :List[Any] = 7 lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS def a ( lowerCamelCase__ = 20 ): '''simple docstring''' A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ ) A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
667
0
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __magic_name__ = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model __magic_name__ = { # fairseq: '''wmt19-ru-en''': {'''length_penalty''': 1.1}, '''wmt19-en-ru''': {'''length_penalty''': 1.1_5}, '''wmt19-en-de''': {'''length_penalty''': 1.0}, '''wmt19-de-en''': {'''length_penalty''': 1.1}, # allenai: '''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6}, '''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6}, '''wmt16-en-de-12-1''': {'''length_penalty''': 0.8}, '''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6}, '''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6}, } # this remaps the different models to their organization names __magic_name__ = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __magic_name__ = '''facebook''' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: __magic_name__ = '''allenai''' def __snake_case ( _UpperCAmelCase ): """simple docstring""" lowercase = dict((re.sub(R'@@$' , '' , _UpperCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCAmelCase ), v) for k, v in d.items() ) lowercase = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] lowercase = d[k] # restore return da def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" assert os.path.exists(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models lowercase = basename(_UpperCAmelCase ) lowercase = dirname(_UpperCAmelCase ) lowercase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel lowercase = cls.hub_models() lowercase = {'bpe': 'fastbpe', 'tokenizer': 'moses'} lowercase = '.' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"""using checkpoint {checkpoint_file}""" ) lowercase = hub_utils.from_pretrained( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , archive_map=_UpperCAmelCase , **_UpperCAmelCase ) lowercase = vars(chkpt['args']['model'] ) lowercase = args['source_lang'] lowercase = args['target_lang'] lowercase = dirname(_UpperCAmelCase ) lowercase = basename(_UpperCAmelCase ) # dicts lowercase = os.path.join(_UpperCAmelCase , f"""dict.{src_lang}.txt""" ) lowercase = os.path.join(_UpperCAmelCase , f"""dict.{tgt_lang}.txt""" ) lowercase = Dictionary.load(_UpperCAmelCase ) lowercase = rewrite_dict_keys(src_dict.indices ) lowercase = len(_UpperCAmelCase ) lowercase = os.path.join(_UpperCAmelCase , 'vocab-src.json' ) print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab lowercase = True for k in src_vocab.keys(): if not k.islower(): lowercase = False break lowercase = Dictionary.load(_UpperCAmelCase ) lowercase = rewrite_dict_keys(tgt_dict.indices ) lowercase = len(_UpperCAmelCase ) lowercase = os.path.join(_UpperCAmelCase , 'vocab-tgt.json' ) print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # merges_file (bpecodes) lowercase = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['merges_file'] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ): break with open(_UpperCAmelCase , encoding='utf-8' ) as fin: lowercase = fin.read() lowercase = re.sub(R' \d+$' , '' , _UpperCAmelCase , 0 , re.M ) # remove frequency number print(f"""Generating {merges_file}""" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as fout: fout.write(_UpperCAmelCase ) # model config lowercase = os.path.join(_UpperCAmelCase , 'config.json' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}""" assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}""" lowercase = { 'architectures': ['FSMTForConditionalGeneration'], 'model_type': 'fsmt', 'activation_dropout': args['activation_dropout'], 'activation_function': 'relu', 'attention_dropout': args['attention_dropout'], 'd_model': args['decoder_embed_dim'], 'dropout': args['dropout'], 'init_std': 0.02, 'max_position_embeddings': args['max_source_positions'], 'num_hidden_layers': args['encoder_layers'], 'src_vocab_size': src_vocab_size, 'tgt_vocab_size': tgt_vocab_size, 'langs': [src_lang, tgt_lang], 'encoder_attention_heads': args['encoder_attention_heads'], 'encoder_ffn_dim': args['encoder_ffn_embed_dim'], 'encoder_layerdrop': args['encoder_layerdrop'], 'encoder_layers': args['encoder_layers'], 'decoder_attention_heads': args['decoder_attention_heads'], 'decoder_ffn_dim': args['decoder_ffn_embed_dim'], 'decoder_layerdrop': args['decoder_layerdrop'], 'decoder_layers': args['decoder_layers'], 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'is_encoder_decoder': True, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_all_embeddings'], } # good hparam defaults to start with lowercase = 5 lowercase = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: lowercase = best_score_hparams[model_dir]['length_penalty'] else: lowercase = 1.0 print(f"""Generating {fsmt_model_config_file}""" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # tokenizer config lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowercase = { 'langs': [src_lang, tgt_lang], 'model_max_length': 10_24, 'do_lower_case': do_lower_case, } print(f"""Generating {fsmt_tokenizer_config_file}""" ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(_UpperCAmelCase , ensure_ascii=_UpperCAmelCase , indent=_UpperCAmelCase ) ) # model lowercase = chkpt['models'][0] lowercase = model.state_dict() # rename keys to start with 'model.' lowercase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys lowercase = [ 'model.model', 'model.encoder.version', 'model.decoder.version', 'model.encoder_embed_tokens.weight', 'model.decoder_embed_tokens.weight', 'model.encoder.embed_positions._float_tensor', 'model.decoder.embed_positions._float_tensor', ] for k in ignore_keys: model_state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) lowercase = FSMTConfig.from_pretrained(_UpperCAmelCase ) lowercase = FSMTForConditionalGeneration(_UpperCAmelCase ) # check that it loads ok model_new.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) # save lowercase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(_UpperCAmelCase , _UpperCAmelCase ) print('Conversion is done!' ) print('\nLast step is to upload the files to s3' ) print(f"""cd {data_root}""" ) print(f"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fsmt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __magic_name__ = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
314
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __magic_name__ = { '''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''], '''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''VisionTextDualEncoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''FlaxVisionTextDualEncoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''TFVisionTextDualEncoderModel'''] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
314
1
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = """hf-internal-testing/tiny-random-t5""" UpperCAmelCase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = tokenizer("""This is me""" , return_tensors="""pt""" ) UpperCAmelCase__ = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) UpperCAmelCase__ = model.generate(**_UpperCAmelCase ) UpperCAmelCase__ = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) UpperCAmelCase__ = model_reloaded.generate(**_UpperCAmelCase ) self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = """hf-internal-testing/tiny-random-t5""" UpperCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_UpperCAmelCase ): model.save_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = model.reverse_bettertransformer() model.save_pretrained(_UpperCAmelCase )
603
'''simple docstring''' import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = """mask2former""" lowerCAmelCase_ : List[Any] = ["""swin"""] lowerCAmelCase_ : Optional[int] = {"""hidden_size""": """hidden_dim"""} def __init__( self : str , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : int , ): """simple docstring""" if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) UpperCAmelCase__ = CONFIG_MAPPING["""swin"""]( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase__ = backbone_config.pop("""model_type""" ) UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase__ = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) UpperCAmelCase__ = backbone_config UpperCAmelCase__ = feature_size UpperCAmelCase__ = mask_feature_size UpperCAmelCase__ = hidden_dim UpperCAmelCase__ = encoder_feedforward_dim UpperCAmelCase__ = activation_function UpperCAmelCase__ = encoder_layers UpperCAmelCase__ = decoder_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = dropout UpperCAmelCase__ = dim_feedforward UpperCAmelCase__ = pre_norm UpperCAmelCase__ = enforce_input_projection UpperCAmelCase__ = common_stride UpperCAmelCase__ = ignore_value UpperCAmelCase__ = num_queries UpperCAmelCase__ = no_object_weight UpperCAmelCase__ = class_weight UpperCAmelCase__ = mask_weight UpperCAmelCase__ = dice_weight UpperCAmelCase__ = train_num_points UpperCAmelCase__ = oversample_ratio UpperCAmelCase__ = importance_sample_ratio UpperCAmelCase__ = init_std UpperCAmelCase__ = init_xavier_std UpperCAmelCase__ = use_auxiliary_loss UpperCAmelCase__ = feature_strides UpperCAmelCase__ = output_auxiliary_logits UpperCAmelCase__ = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : int , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Tuple ): """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ = self.backbone_config.to_dict() UpperCAmelCase__ = self.__class__.model_type return output
603
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def _a ( lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( lowercase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: SCREAMING_SNAKE_CASE__ : List[Any] = s_dict.pop(lowercase__ ) elif "subsample" in key: SCREAMING_SNAKE_CASE__ : Optional[Any] = s_dict.pop(lowercase__ ) def _a ( lowercase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = emb.weight.shape SCREAMING_SNAKE_CASE__ : int = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = emb.weight.data return lin_layer def _a ( lowercase__ : List[str] , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = torch.load(lowercase__ , map_location='cpu' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = mam_aaa['args'] SCREAMING_SNAKE_CASE__ : Dict = mam_aaa['model'] SCREAMING_SNAKE_CASE__ : List[str] = state_dict['decoder.output_projection.weight'] remove_ignore_keys_(lowercase__ ) rename_keys(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0] SCREAMING_SNAKE_CASE__ : Optional[int] = args.share_decoder_input_output_embed SCREAMING_SNAKE_CASE__ : Optional[int] = [int(lowercase__ ) for i in args.conv_kernel_sizes.split(',' )] SCREAMING_SNAKE_CASE__ : Union[str, Any] = SpeechaTextConfig( vocab_size=lowercase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(lowercase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase__ , num_beams=5 , max_length=2_00 , use_cache=lowercase__ , decoder_start_token_id=2 , early_stopping=lowercase__ , ) SCREAMING_SNAKE_CASE__ : Any = SpeechaTextForConditionalGeneration(lowercase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.model.load_state_dict(lowercase__ , strict=lowercase__ ) if len(lowercase__ ) > 0 and not set(lowercase__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' f''' but all the following weights are missing {missing}''' ) if tie_embeds: SCREAMING_SNAKE_CASE__ : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm_head_weights model.save_pretrained(lowercase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
636
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _a ( lowercase__ : List[str] ): '''simple docstring''' if not is_accelerate_available(): return method SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version if version.parse(lowercase__ ) < version.parse('0.17.0' ): return method def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *lowercase__ , **lowercase__ ) return wrapper
636
1
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image a_ = ['text', 'image', 'audio'] def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[str] = [] for input_type in input_types: if input_type == "text": inputs.append("Text input") elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512))) elif input_type == "audio": inputs.append(torch.ones(3000)) elif isinstance(_a , _a): inputs.append(create_inputs(_a)) else: raise ValueError(f"Invalid type requested: {input_type}") return inputs def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[Any] = [] for output in outputs: if isinstance(_a , (str, AgentText)): output_types.append("text") elif isinstance(_a , (Image.Image, AgentImage)): output_types.append("image") elif isinstance(_a , (torch.Tensor, AgentAudio)): output_types.append("audio") else: raise ValueError(f"Invalid output: {output}") return output_types @is_tool_test class _UpperCamelCase : '''simple docstring''' def __UpperCamelCase ( self : Dict ) -> List[Any]: """simple docstring""" self.assertTrue(hasattr(self.tool , "inputs" ) ) self.assertTrue(hasattr(self.tool , "outputs" ) ) SCREAMING_SNAKE_CASE : List[str] = self.tool.inputs for _input in inputs: if isinstance(_input , a ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def __UpperCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE : Dict = self.tool(*a ) # There is a single output if len(self.tool.outputs ) == 1: SCREAMING_SNAKE_CASE : Union[str, Any] = [outputs] self.assertListEqual(output_types(a ) , self.tool.outputs ) def __UpperCamelCase ( self : List[str] ) -> int: """simple docstring""" self.assertTrue(hasattr(self.tool , "description" ) ) self.assertTrue(hasattr(self.tool , "default_checkpoint" ) ) self.assertTrue(self.tool.description.startswith("This is a tool that" ) ) def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE : Any = self.tool(*a ) if not isinstance(a , a ): SCREAMING_SNAKE_CASE : Dict = [outputs] self.assertEqual(len(a ) , len(self.tool.outputs ) ) for output, output_type in zip(a , self.tool.outputs ): SCREAMING_SNAKE_CASE : Any = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(a , a ) ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE : List[Any] = [] for _input, input_type in zip(a , self.tool.inputs ): if isinstance(a , a ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error SCREAMING_SNAKE_CASE : Tuple = self.tool(*a ) if not isinstance(a , a ): SCREAMING_SNAKE_CASE : List[str] = [outputs] self.assertEqual(len(a ) , len(self.tool.outputs ) )
25
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]: super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = eval_examples lowerCamelCase : Optional[int] = post_process_function def _lowercase ( self , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__ = "eval" , **UpperCamelCase__ , ) -> Dict[str, float]: lowerCamelCase : Dict = gen_kwargs.copy() lowerCamelCase : List[str] = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) lowerCamelCase : List[str] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) lowerCamelCase : Optional[Any] = gen_kwargs lowerCamelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset lowerCamelCase : List[str] = self.get_eval_dataloader(UpperCamelCase__ ) lowerCamelCase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase : Dict = self.compute_metrics lowerCamelCase : Any = None lowerCamelCase : Optional[int] = time.time() lowerCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCamelCase : Dict = eval_loop( UpperCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , ) finally: lowerCamelCase : Union[str, Any] = compute_metrics lowerCamelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCamelCase : List[str] = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = self.compute_metrics(UpperCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): lowerCamelCase : Any = metrics.pop(UpperCamelCase__ ) metrics.update(output.metrics ) else: lowerCamelCase : Tuple = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCamelCase : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ ) return metrics def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" , **UpperCamelCase__ ) -> int: lowerCamelCase : str = gen_kwargs.copy() lowerCamelCase : str = self.get_test_dataloader(UpperCamelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase : Union[str, Any] = self.compute_metrics lowerCamelCase : int = None lowerCamelCase : Optional[int] = time.time() lowerCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCamelCase : Any = eval_loop( UpperCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , ) finally: lowerCamelCase : Tuple = compute_metrics lowerCamelCase : Optional[Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCamelCase : str = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , "predict" ) lowerCamelCase : Dict = self.compute_metrics(UpperCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): lowerCamelCase : int = metrics.pop(UpperCamelCase__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
311
0
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self: Dict , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: str=13 , _lowerCamelCase: Tuple=30 , _lowerCamelCase: str=2 , _lowerCamelCase: List[Any]=3 , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: List[Any]=True , _lowerCamelCase: int=32 , _lowerCamelCase: Optional[int]=5 , _lowerCamelCase: int=4 , _lowerCamelCase: str=37 , _lowerCamelCase: Optional[Any]="gelu" , _lowerCamelCase: Tuple=0.1 , _lowerCamelCase: Optional[int]=0.1 , _lowerCamelCase: Optional[int]=10 , _lowerCamelCase: Any=0.02 , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Tuple=2 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE_ = num_patches + 1 def _A ( self: Tuple ): SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, labels def _A ( self: Tuple ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _A ( self: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: Union[str, Any] ): SCREAMING_SNAKE_CASE_ = ViTModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self: List[str] , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] ): SCREAMING_SNAKE_CASE_ = ViTForMaskedImageModeling(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = ViTForMaskedImageModeling(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _A ( self: Tuple , _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: Dict ): SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size SCREAMING_SNAKE_CASE_ = ViTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = ViTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self: str ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : str = ( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : List[Any] = False SCREAMING_SNAKE_CASE__ : str = False def _A ( self: Union[str, Any] ): SCREAMING_SNAKE_CASE_ = ViTModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _A ( self: Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _A ( self: Optional[int] ): pass def _A ( self: Optional[int] ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def _A ( self: List[str] ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _A ( self: str ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _A ( self: int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase ) def _A ( self: Tuple ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _A ( self: int ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ = ViTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def a (): SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): '''simple docstring''' @cached_property def _A ( self: int ): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _A ( self: Optional[Any] ): SCREAMING_SNAKE_CASE_ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _A ( self: Union[str, Any] ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. SCREAMING_SNAKE_CASE_ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 ) SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE_ = inputs.pixel_values.to(_lowerCamelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase ) # verify the logits SCREAMING_SNAKE_CASE_ = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def _A ( self: Optional[int] ): SCREAMING_SNAKE_CASE_ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE_ = inputs.pixel_values.to(_lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
717
def a (_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [] while len(_lowerCAmelCase ) > 1: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase ), max(_lowerCAmelCase ) start.append(_lowerCAmelCase ) end.append(_lowerCAmelCase ) collection.remove(_lowerCAmelCase ) collection.remove(_lowerCAmelCase ) end.reverse() return start + collection + end if __name__ == "__main__": __SCREAMING_SNAKE_CASE =input("""Enter numbers separated by a comma:\n""").strip() __SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
89
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'audio-spectrogram-transformer' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=16 , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=10 , _lowerCAmelCase=1024 , _lowerCAmelCase=128 , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : Optional[Any] = patch_size UpperCAmelCase__ : Tuple = qkv_bias UpperCAmelCase__ : Tuple = frequency_stride UpperCAmelCase__ : Union[str, Any] = time_stride UpperCAmelCase__ : Optional[Any] = max_length UpperCAmelCase__ : Optional[int] = num_mel_bins
79
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = False, False, False @dataclass class snake_case_ : '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) __UpperCamelCase = field(default='''Audio''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def __call__( self : Optional[Any] ) -> List[str]: '''simple docstring''' return self.pa_type def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, bytes, dict] ) -> dict: '''simple docstring''' try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err if isinstance(__lowerCamelCase , __lowerCamelCase ): return {"bytes": None, "path": value} elif isinstance(__lowerCamelCase , __lowerCamelCase ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes __lowercase = BytesIO() sf.write(__lowerCamelCase , value['array'] , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('pcm' ): # "PCM" only has raw audio bytes if value.get('sampling_rate' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' ) if value.get('bytes' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) __lowercase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767 else: __lowercase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767 __lowercase = BytesIO(bytes() ) sf.write(__lowerCamelCase , __lowerCamelCase , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def UpperCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: '''simple docstring''' if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' ) __lowercase , __lowercase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None) if path is None and file is None: raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err __lowercase = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( 'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( 'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: __lowercase = token_per_repo_id or {} __lowercase = path.split('::' )[-1] try: __lowercase = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['repo_id'] __lowercase = token_per_repo_id[repo_id] except (ValueError, KeyError): __lowercase = None with xopen(__lowerCamelCase , 'rb' , use_auth_token=__lowerCamelCase ) as f: __lowercase , __lowercase = sf.read(__lowerCamelCase ) else: __lowercase , __lowercase = sf.read(__lowerCamelCase ) __lowercase = array.T if self.mono: __lowercase = librosa.to_mono(__lowerCamelCase ) if self.sampling_rate and self.sampling_rate != sampling_rate: __lowercase = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate ) __lowercase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCAmelCase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value if self.decode: raise ValueError('Cannot flatten a decoded Audio feature.' ) return { "bytes": Value('binary' ), "path": Value('string' ), } def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type ): __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) __lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) __lowercase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ): __lowercase = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: __lowercase = storage.field('bytes' ) else: __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: __lowercase = storage.field('path' ) else: __lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() ) __lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) return array_cast(__lowerCamelCase , self.pa_type ) def UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(__lowerCamelCase : Any ): with xopen(__lowerCamelCase , 'rb' ) as f: __lowercase = f.read() return bytes_ __lowercase = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __lowercase = pa.array( [os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , ) __lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(__lowerCamelCase , self.pa_type )
375
0
"""simple docstring""" def _lowerCamelCase ( __a, __a, __a ): if exponent == 1: return base if exponent % 2 == 0: SCREAMING_SNAKE_CASE_ = _modexpt(__a, exponent // 2, __a ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__a, exponent - 1, __a )) % modulo_value def _lowerCamelCase ( __a = 1_777, __a = 1_855, __a = 8 ): SCREAMING_SNAKE_CASE_ = base for _ in range(1, __a ): SCREAMING_SNAKE_CASE_ = _modexpt(__a, __a, 10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
628
"""simple docstring""" def _lowerCamelCase ( __a ): if not isinstance(__a, __a ): SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer' raise TypeError(__a ) if number < 1: SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0' raise ValueError(__a ) SCREAMING_SNAKE_CASE_ = 1 for i in range(1, __a ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
628
1
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a : Optional[Any] = logging.get_logger(__name__) a : Dict = {'''vocab_file''': '''sentencepiece.model'''} a : Tuple = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } a : str = { '''google/rembert''': 256, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , a_ : int , a_ : Any=False , a_ : List[Any]=True , a_ : List[Any]=True , a_ : List[Any]="[CLS]" , a_ : List[Any]="[SEP]" , a_ : List[Any]="[UNK]" , a_ : str="[SEP]" , a_ : List[str]="[PAD]" , a_ : Optional[int]="[CLS]" , a_ : List[str]="[MASK]" , **a_ : str , ): """simple docstring""" super().__init__( do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , ) __snake_case = do_lower_case __snake_case = remove_space __snake_case = keep_accents __snake_case = vocab_file __snake_case = spm.SentencePieceProcessor() self.sp_model.Load(a_ ) @property def A ( self : Optional[Any] ): """simple docstring""" return len(self.sp_model ) def A ( self : Optional[Any] ): """simple docstring""" __snake_case = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): """simple docstring""" __snake_case = self.__dict__.copy() __snake_case = None return state def __setstate__( self : str , a_ : Optional[int] ): """simple docstring""" __snake_case = d __snake_case = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def A ( self : Tuple , a_ : Optional[int] , a_ : int=False ): """simple docstring""" __snake_case = self.sp_model.EncodeAsPieces(a_ ) return pieces def A ( self : Any , a_ : Optional[Any] ): """simple docstring""" return self.sp_model.PieceToId(a_ ) def A ( self : Optional[Any] , a_ : List[str] ): """simple docstring""" return self.sp_model.IdToPiece(a_ ) def A ( self : Optional[Any] , a_ : int ): """simple docstring""" __snake_case = self.sp_model.decode_pieces(a_ ) return out_string def A ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def A ( self : List[str] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1] def A ( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A ( self : List[Any] , a_ : str , a_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(a_ ): logger.error("Vocabulary path ({}) should be a directory".format(a_ ) ) return __snake_case = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
69
0
from sklearn.metrics import fa_score import datasets SCREAMING_SNAKE_CASE : List[str] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" SCREAMING_SNAKE_CASE : Optional[int] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" SCREAMING_SNAKE_CASE : Tuple = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=1 , UpperCamelCase_="binary" , UpperCamelCase_=None ): lowercase_ :List[Any] = fa_score( UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ , pos_label=UpperCamelCase_ , average=UpperCamelCase_ , sample_weight=UpperCamelCase_ ) return {"f1": float(UpperCamelCase_ ) if score.size == 1 else score}
441
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCamelCase : '''simple docstring''' def __init__( self , UpperCamelCase_ , ): lowercase_ :Dict = parent lowercase_ :Optional[Any] = 13 lowercase_ :Optional[Any] = 7 lowercase_ :List[Any] = 30 lowercase_ :int = self.seq_length + self.mem_len lowercase_ :Any = 15 lowercase_ :Optional[Any] = True lowercase_ :List[Any] = True lowercase_ :Any = 99 lowercase_ :Optional[int] = [10, 50, 80] lowercase_ :Union[str, Any] = 32 lowercase_ :List[Any] = 32 lowercase_ :Tuple = 4 lowercase_ :Tuple = 8 lowercase_ :List[Any] = 128 lowercase_ :Any = 2 lowercase_ :Tuple = 2 lowercase_ :Dict = None lowercase_ :Optional[Any] = 1 lowercase_ :Optional[int] = 0 lowercase_ :List[str] = 3 lowercase_ :Optional[int] = self.vocab_size - 1 lowercase_ :List[Any] = 0.01 def UpperCamelCase ( self ): lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ :int = None if self.use_labels: lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ :int = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def UpperCamelCase ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): lowercase_ :Any = TFTransfoXLModel(UpperCamelCase_ ) lowercase_ , lowercase_ :List[Any] = model(UpperCamelCase_ ).to_tuple() lowercase_ :Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): lowercase_ :Any = TFTransfoXLLMHeadModel(UpperCamelCase_ ) lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple() lowercase_ :Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase_ , lowercase_ :Optional[int] = model(UpperCamelCase_ ).to_tuple() lowercase_ , lowercase_ :Tuple = model([input_ids_a, mems_a] ).to_tuple() lowercase_ :Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase_ , lowercase_ :Union[str, Any] = model(UpperCamelCase_ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): lowercase_ :int = TFTransfoXLForSequenceClassification(UpperCamelCase_ ) lowercase_ :int = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self ): lowercase_ :str = self.prepare_config_and_inputs() ((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) :Tuple = config_and_inputs lowercase_ :Dict = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' lowercase : str =( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowercase : Dict =() if is_tf_available() else () lowercase : List[str] =( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowercase : Optional[int] =False lowercase : Tuple =False lowercase : Dict =False lowercase : Dict =False def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def UpperCamelCase ( self ): lowercase_ :Union[str, Any] = TFTransfoXLModelTester(self ) lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 ) def UpperCamelCase ( self ): self.config_tester.run_common_tests() def UpperCamelCase ( self ): self.model_tester.set_seed() lowercase_ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ ) def UpperCamelCase ( self ): self.model_tester.set_seed() lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ ) def UpperCamelCase ( self ): lowercase_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ ) def UpperCamelCase ( self ): lowercase_ , lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ :List[str] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase_ :Dict = model_class(UpperCamelCase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase_ :str = model.get_output_embeddings() assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) lowercase_ :Optional[int] = model.get_bias() assert name is None else: lowercase_ :List[Any] = model.get_output_embeddings() assert x is None lowercase_ :Dict = model.get_bias() assert name is None def UpperCamelCase ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def UpperCamelCase ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ :Dict = TFTransfoXLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def UpperCamelCase ( self ): pass @require_tf class UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def UpperCamelCase ( self ): lowercase_ :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase_ :List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase_ :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase_ :Any = model.generate(UpperCamelCase_ , max_length=200 , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
441
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowercase_ ( __UpperCAmelCase ) -> int: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E_00 and cp <= 0X9F_FF) or (cp >= 0X34_00 and cp <= 0X4D_BF) # or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) # or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) # or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) # or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) # or (cp >= 0XF9_00 and cp <= 0XFA_FF) or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) # ): # return True return False def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]: # word like '180' or '身高' or '神' for char in word: lowerCAmelCase__ : Optional[int] = ord(__UpperCAmelCase ) if not _is_chinese_char(__UpperCAmelCase ): return 0 return 1 def lowercase_ ( __UpperCAmelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = set() for token in tokens: lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) > 1 and is_chinese(__UpperCAmelCase ) if chinese_word: word_set.add(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) return word_list def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: if not chinese_word_set: return bert_tokens lowerCAmelCase__ : Union[str, Any] = max([len(__UpperCAmelCase ) for w in chinese_word_set] ) lowerCAmelCase__ : List[str] = bert_tokens lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, len(__UpperCAmelCase ) while start < end: lowerCAmelCase__ : Any = True if is_chinese(bert_word[start] ): lowerCAmelCase__ : int = min(end - start , __UpperCAmelCase ) for i in range(__UpperCAmelCase , 1 , -1 ): lowerCAmelCase__ : Any = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase__ : List[Any] = """##""" + bert_word[j] lowerCAmelCase__ : Tuple = start + i lowerCAmelCase__ : int = False break if single_word: start += 1 return bert_word def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = [] for i in range(0 , len(__UpperCAmelCase ) , 100 ): lowerCAmelCase__ : List[str] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCAmelCase__ : Dict = [get_chinese_word(__UpperCAmelCase ) for r in res] ltp_res.extend(__UpperCAmelCase ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [] for i in range(0 , len(__UpperCAmelCase ) , 100 ): lowerCAmelCase__ : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] for input_ids, chinese_word in zip(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = [] for id in input_ids: lowerCAmelCase__ : Union[str, Any] = bert_tokenizer._convert_id_to_token(__UpperCAmelCase ) input_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = add_sub_symbol(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCAmelCase ): if token[:2] == "##": lowerCAmelCase__ : Optional[Any] = token[2:] # save chinese tokens' pos if len(__UpperCAmelCase ) == 1 and _is_chinese_char(ord(__UpperCAmelCase ) ): ref_id.append(__UpperCAmelCase ) ref_ids.append(__UpperCAmelCase ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ) return ref_ids def lowercase_ ( __UpperCAmelCase ) -> List[Any]: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase__ : int = f.readlines() lowerCAmelCase__ : List[Any] = [line.strip() for line in data if len(__UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase__ : Dict = LTP(args.ltp ) # faster in GPU device lowerCAmelCase__ : int = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase__ : int = prepare_ref(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCAmelCase__ : Union[str, Any] = [json.dumps(__UpperCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__UpperCAmelCase ) if __name__ == "__main__": _A = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) _A = parser.parse_args() main(args)
299
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class _lowerCamelCase ( unittest.TestCase ): def _lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" lowerCAmelCase__ : Optional[int] = ["""a""", """b""", """c"""] # Defaults to last layer if both are None lowerCAmelCase__ , lowerCAmelCase__ : str = get_aligned_output_features_output_indices(UpperCamelCase , UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , ["""c"""] ) self.assertEqual(UpperCamelCase , [2] ) # Out indices set to match out features lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , ["""a""", """c"""] ) self.assertEqual(UpperCamelCase , [0, 2] ) # Out features set to match out indices lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(UpperCamelCase , [0, 2] , UpperCamelCase ) self.assertEqual(UpperCamelCase , ["""a""", """c"""] ) self.assertEqual(UpperCamelCase , [0, 2] ) # Out features selected from negative indices lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(UpperCamelCase , [-3, -1] , UpperCamelCase ) self.assertEqual(UpperCamelCase , ["""a""", """c"""] ) self.assertEqual(UpperCamelCase , [-3, -1] ) def _lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" # Stage names must be set with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase ) # Out features must be a list with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(UpperCamelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(UpperCamelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(UpperCamelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def _lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : str = BackboneMixin() lowerCAmelCase__ : str = ["""a""", """b""", """c"""] lowerCAmelCase__ : List[str] = ["""a""", """c"""] lowerCAmelCase__ : Union[str, Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly lowerCAmelCase__ : List[str] = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) lowerCAmelCase__ : int = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
299
1
"""simple docstring""" from __future__ import annotations class __UpperCAmelCase : def __init__( self : str , a_ : Optional[int]=None ) -> Optional[int]: '''simple docstring''' a__ : str = data a__ : Optional[int] = None def __repr__( self : str ) -> List[str]: '''simple docstring''' a__ : int = [] a__ : List[str] = self while temp: string_rep.append(F"{temp.data}" ) a__ : Any = temp.next return "->".join(a_ ) def lowercase__ ( lowerCAmelCase__ : list ) -> int: '''simple docstring''' if not elements_list: raise Exception("The Elements List is empty" ) a__ : List[str] = Node(elements_list[0] ) for i in range(1 , len(lowerCAmelCase__ ) ): a__ : int = Node(elements_list[i] ) a__ : int = current.next return head def lowercase__ ( lowerCAmelCase__ : Node ) -> None: '''simple docstring''' if head_node is not None and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): print_reverse(head_node.next ) print(head_node.data ) def lowercase__ ( ) -> Union[str, Any]: '''simple docstring''' from doctest import testmod testmod() a__ : Optional[int] = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print("Linked List:" ) print(lowerCAmelCase__ ) print("Elements in Reverse:" ) print_reverse(lowerCAmelCase__ ) if __name__ == "__main__": main()
251
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''▁''' __UpperCAmelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __UpperCAmelCase = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } __UpperCAmelCase = { '''facebook/m2m100_418M''': 1024, } # fmt: off __UpperCAmelCase = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class __UpperCAmelCase ( _UpperCamelCase ): __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ["input_ids", "attention_mask"] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : Any , a_ : Any , a_ : int , a_ : int=None , a_ : Union[str, Any]=None , a_ : Optional[Any]="<s>" , a_ : Tuple="</s>" , a_ : int="</s>" , a_ : Optional[int]="<pad>" , a_ : List[Any]="<unk>" , a_ : Tuple="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Optional[Any]=8 , **a_ : Union[str, Any] , ) -> None: '''simple docstring''' a__ : int = {} if sp_model_kwargs is None else sp_model_kwargs a__ : List[str] = language_codes a__ : int = FAIRSEQ_LANGUAGE_CODES[language_codes] a__ : Tuple = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code} a__ : Optional[Any] = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(a_ ) for lang_code in fairseq_language_code if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , ) a__ : List[str] = vocab_file a__ : Optional[int] = load_json(a_ ) a__ : List[Any] = {v: k for k, v in self.encoder.items()} a__ : List[Any] = spm_file a__ : Any = load_spm(a_ , self.sp_model_kwargs ) a__ : Tuple = len(self.encoder ) a__ : Any = { self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ ) } a__ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )} a__ : Any = {v: k for k, v in self.lang_token_to_id.items()} a__ : Union[str, Any] = src_lang if src_lang is not None else "en" a__ : Union[str, Any] = tgt_lang a__ : List[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) a__ : Optional[int] = num_madeup_words @property def UpperCAmelCase ( self : Dict ) -> int: '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def UpperCAmelCase ( self : Any ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def UpperCAmelCase ( self : List[Any] , a_ : str ) -> None: '''simple docstring''' a__ : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase ( self : Tuple , a_ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(a_ , out_type=a_ ) def UpperCAmelCase ( self : List[Any] , a_ : Optional[int] ) -> Any: '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(a_ , self.encoder[self.unk_token] ) def UpperCAmelCase ( self : str , a_ : int ) -> str: '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(a_ , self.unk_token ) def UpperCAmelCase ( self : Dict , a_ : Optional[int] ) -> Optional[int]: '''simple docstring''' a__ : Optional[Any] = [] a__ : Optional[int] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(a_ ) + token a__ : List[str] = [] else: current_sub_tokens.append(a_ ) out_string += self.sp_model.decode(a_ ) return out_string.strip() def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) a__ : Any = [1] * len(self.prefix_tokens ) a__ : Optional[int] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(a_ )) + suffix_ones return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase ( self : str ) -> Dict: '''simple docstring''' a__ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Dict: '''simple docstring''' a__ : Tuple = self.__dict__.copy() a__ : Optional[int] = None return state def __setstate__( self : List[str] , a_ : Dict ) -> None: '''simple docstring''' a__ : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a__ : List[Any] = {} a__ : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCAmelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' a__ : Dict = Path(a_ ) if not save_dir.is_dir(): raise OSError(F"{save_directory} should be a directory" ) a__ : Union[str, Any] = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) a__ : Tuple = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , a_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , a_ ) elif not os.path.isfile(self.spm_file ): with open(a_ , "wb" ) as fi: a__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(a_ ) return (str(a_ ), str(a_ )) def UpperCAmelCase ( self : Any , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : Dict , ) -> BatchEncoding: '''simple docstring''' a__ : str = src_lang a__ : Any = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(a_ , a_ , **a_ ) def UpperCAmelCase ( self : Optional[Any] , a_ : Dict , a_ : Optional[str] , a_ : Optional[str] , **a_ : Tuple ) -> str: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) a__ : List[Any] = src_lang a__ : Optional[int] = self(a_ , add_special_tokens=a_ , **a_ ) a__ : Any = self.get_lang_id(a_ ) a__ : int = tgt_lang_id return inputs def UpperCAmelCase ( self : Any ) -> Optional[Any]: '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase ( self : Optional[int] ) -> Tuple: '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase ( self : Union[str, Any] , a_ : str ) -> None: '''simple docstring''' a__ : Optional[int] = self.get_lang_token(a_ ) a__ : Tuple = self.lang_token_to_id[lang_token] a__ : List[str] = [self.cur_lang_id] a__ : Optional[int] = [self.eos_token_id] def UpperCAmelCase ( self : List[str] , a_ : str ) -> None: '''simple docstring''' a__ : Optional[int] = self.get_lang_token(a_ ) a__ : int = self.lang_token_to_id[lang_token] a__ : Tuple = [self.cur_lang_id] a__ : Optional[int] = [self.eos_token_id] def UpperCAmelCase ( self : Any , a_ : str ) -> str: '''simple docstring''' return self.lang_code_to_token[lang] def UpperCAmelCase ( self : List[str] , a_ : str ) -> int: '''simple docstring''' a__ : List[str] = self.get_lang_token(a_ ) return self.lang_token_to_id[lang_token] def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: '''simple docstring''' a__ : Any = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ ) spm.Load(str(lowerCAmelCase__ ) ) return spm def lowercase__ ( lowerCAmelCase__ : str ) -> Union[Dict, List]: '''simple docstring''' with open(lowerCAmelCase__ , "r" ) as f: return json.load(lowerCAmelCase__ ) def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> None: '''simple docstring''' with open(lowerCAmelCase__ , "w" ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 )
251
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowercase__ : List[str] = 'hf-internal-testing/tiny-random-bert' lowercase__ : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') lowercase__ : Union[str, Any] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(SCREAMING_SNAKE_CASE_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''refs''' , '''main''' ) ) as f: _UpperCamelCase = f.read() self.assertEqual(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''snapshots''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) ) # File is cached at the same place the second time. _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Using a specific revision to test the full commit hash. _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , revision='''9b8c223''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''snapshots''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def snake_case__ ( self : Optional[Any] ) -> str: '''simple docstring''' with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid model identifier''' ): _UpperCamelCase = cached_file('''tiny-random-bert''' , SCREAMING_SNAKE_CASE_ ) with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid git identifier''' ): _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , revision='''aaaa''' ) with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''does not appear to have a file named''' ): _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' ) def snake_case__ ( self : Dict ) -> Any: '''simple docstring''' with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''does not appear to have a file named''' ): _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''refs''' , '''main''' ) ) as f: _UpperCamelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , '''.no_exist''' , SCREAMING_SNAKE_CASE_ , '''conf''' ) ) ) _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE_ ) self.assertIsNone(SCREAMING_SNAKE_CASE_ ) _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' , local_files_only=SCREAMING_SNAKE_CASE_ , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE_ ) self.assertIsNone(SCREAMING_SNAKE_CASE_ ) _UpperCamelCase = mock.Mock() _UpperCamelCase = 500 _UpperCamelCase = {} _UpperCamelCase = HTTPError _UpperCamelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head: _UpperCamelCase = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' , _raise_exceptions_for_connection_errors=SCREAMING_SNAKE_CASE_ ) self.assertIsNone(SCREAMING_SNAKE_CASE_ ) # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE_ ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , SCREAMING_SNAKE_CASE_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , SCREAMING_SNAKE_CASE_ , revision='''ahaha''' ) _UpperCamelCase = get_file_from_repo('''bert-base-cased''' , SCREAMING_SNAKE_CASE_ ) # The name is the cached name which is not very easy to test, so instead we load the content. _UpperCamelCase = json.loads(open(SCREAMING_SNAKE_CASE_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def snake_case__ ( self : Tuple ) -> List[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = Path(SCREAMING_SNAKE_CASE_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(SCREAMING_SNAKE_CASE_ , '''a.txt''' ) , str(SCREAMING_SNAKE_CASE_ ) ) self.assertIsNone(get_file_from_repo(SCREAMING_SNAKE_CASE_ , '''b.txt''' ) )
98
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class _A ( unittest.TestCase ): def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 / 255 , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]: '''simple docstring''' UpperCamelCase__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = num_channels UpperCamelCase__ = min_resolution UpperCamelCase__ = max_resolution UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = do_normalize UpperCamelCase__ = image_mean UpperCamelCase__ = image_std UpperCamelCase__ = do_rescale UpperCamelCase__ = rescale_factor UpperCamelCase__ = do_pad def _a (self ) -> List[str]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> str: '''simple docstring''' if not batched: UpperCamelCase__ = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ = image.size else: UpperCamelCase__ , UpperCamelCase__ = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ = self.size['''shortest_edge'''] UpperCamelCase__ = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ = self.size['''shortest_edge'''] UpperCamelCase__ = self.size['''shortest_edge'''] else: UpperCamelCase__ = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0] UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _A ( __UpperCamelCase , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[int] =DetaImageProcessor if is_vision_available() else None def _a (self ) -> Tuple: '''simple docstring''' UpperCamelCase__ = DetaImageProcessingTester(self ) @property def _a (self ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _a (self ) -> Dict: '''simple docstring''' UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_rescale''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_pad''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) ) def _a (self ) -> Dict: '''simple docstring''' UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ ) def _a (self ) -> List[Any]: '''simple docstring''' pass def _a (self ) -> List[str]: '''simple docstring''' UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a (self ) -> int: '''simple docstring''' UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a (self ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _a (self ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ = json.loads(f.read() ) UpperCamelCase__ = {'''image_id''': 3_9769, '''annotations''': target} # encode them UpperCamelCase__ = DetaImageProcessor() UpperCamelCase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) # verify area UpperCamelCase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) ) # verify boxes UpperCamelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) # verify image_id UpperCamelCase__ = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) ) # verify is_crowd UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) ) # verify class_labels UpperCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) ) # verify orig_size UpperCamelCase__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) ) # verify size UpperCamelCase__ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) ) @slow def _a (self ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ = json.loads(f.read() ) UpperCamelCase__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target} UpperCamelCase__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ = DetaImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) # verify area UpperCamelCase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) ) # verify boxes UpperCamelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) # verify image_id UpperCamelCase__ = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) ) # verify is_crowd UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) ) # verify class_labels UpperCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) ) # verify masks UpperCamelCase__ = 82_2873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE_ ) # verify orig_size UpperCamelCase__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) ) # verify size UpperCamelCase__ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
415
0
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( __snake_case , unittest.TestCase ): a: str = DanceDiffusionPipeline a: Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS a: str = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } a: Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS a: int = False a: List[Any] = False def _A ( self: Optional[int] ): torch.manual_seed(0 ) _a = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) _a = IPNDMScheduler() _a = { '''unet''': unet, '''scheduler''': scheduler, } return components def _A ( self: Any , __UpperCamelCase: List[Any] , __UpperCamelCase: Any=0 ): if str(__UpperCamelCase ).startswith('''mps''' ): _a = torch.manual_seed(__UpperCamelCase ) else: _a = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _a = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def _A ( self: Any ): _a = '''cpu''' # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = DanceDiffusionPipeline(**__UpperCamelCase ) _a = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _a = self.get_dummy_inputs(__UpperCamelCase ) _a = pipe(**__UpperCamelCase ) _a = output.audios _a = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) _a = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _A ( self: int ): return super().test_save_load_local() @skip_mps def _A ( self: Union[str, Any] ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def _A ( self: Optional[Any] ): return super().test_save_load_optional_components() @skip_mps def _A ( self: Optional[int] ): return super().test_attention_slicing_forward_pass() def _A ( self: Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def _A ( self: Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self: Dict ): _a = torch_device _a = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) _a = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _a = torch.manual_seed(0 ) _a = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) _a = output.audios _a = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _a = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self: Any ): _a = torch_device _a = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) _a = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _a = torch.manual_seed(0 ) _a = pipe(generator=__UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) _a = output.audios _a = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) _a = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase :List[Any] = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :str = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
346
0
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class __lowercase ( __lowerCamelCase ): def __lowercase ( self : int ,A : Dict=None ,A : Optional[Any]=None ,A : Any=None ,**A : Dict ): '''simple docstring''' if tokenize_kwargs is None: UpperCAmelCase__ : List[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) UpperCAmelCase__ : List[str] = truncation UpperCAmelCase__ : Union[str, Any] = tokenize_kwargs UpperCAmelCase__ : List[Any] = {} if return_tensors is not None: UpperCAmelCase__ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def __lowercase ( self : Optional[int] ,A : str ,**A : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.framework UpperCAmelCase__ : Optional[int] = self.tokenizer(A ,return_tensors=A ,**A ) return model_inputs def __lowercase ( self : Optional[int] ,A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model(**A ) return model_outputs def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Dict=False ): '''simple docstring''' # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Union[str, Any] ,*A : Tuple ,**A : Optional[int] ): '''simple docstring''' return super().__call__(*A ,**A )
65
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Dict = { "facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json", } class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : int = "timesformer" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_ ) __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = num_frames __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = qkv_bias __snake_case = attention_type __snake_case = drop_path_rate
56
0
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCamelCase__ = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class _lowerCAmelCase : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE = 14 ) -> None: """simple docstring""" if group not in primes: raise ValueError('''Unsupported Group''' ) snake_case__ : Union[str, Any] =primes[group]['''prime'''] snake_case__ : List[str] =primes[group]['''generator'''] snake_case__ : Tuple =int(hexlify(urandom(32 ) ) , base=16 ) def UpperCAmelCase ( self ) -> str: """simple docstring""" return hex(self.__private_key )[2:] def UpperCAmelCase ( self ) -> str: """simple docstring""" snake_case__ : Union[str, Any] =pow(self.generator , self.__private_key , self.prime ) return hex(__SCREAMING_SNAKE_CASE )[2:] def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return ( 2 <= key <= self.prime - 2 and pow(__SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1 ) def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" snake_case__ : Any =int(__SCREAMING_SNAKE_CASE , base=16 ) if not self.is_valid_public_key(__SCREAMING_SNAKE_CASE ): raise ValueError('''Invalid public key''' ) snake_case__ : Optional[int] =pow(__SCREAMING_SNAKE_CASE , self.__private_key , self.prime ) return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest() @staticmethod def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return ( 2 <= remote_public_key_str <= prime - 2 and pow(__SCREAMING_SNAKE_CASE , (prime - 1) // 2 , __SCREAMING_SNAKE_CASE ) == 1 ) @staticmethod def UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 14 ) -> str: """simple docstring""" snake_case__ : Union[str, Any] =int(__SCREAMING_SNAKE_CASE , base=16 ) snake_case__ : int =int(__SCREAMING_SNAKE_CASE , base=16 ) snake_case__ : Dict =primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError('''Invalid public key''' ) snake_case__ : Any =pow(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return shaaaa(str(__SCREAMING_SNAKE_CASE ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
717
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowerCamelCase__ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def lowercase_ ( SCREAMING_SNAKE_CASE : str = "mumbai" ): """simple docstring""" snake_case__ : Optional[int] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): snake_case__ : Optional[Any] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() snake_case__ : Dict =job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
408
0
import torch def lowerCAmelCase_ ( ): if torch.cuda.is_available(): __snake_case : int = torch.cuda.device_count() else: __snake_case : Tuple = 0 print(F'Successfully ran on {num_gpus} GPUs' ) if __name__ == "__main__": main()
81
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _snake_case : Union[str, Any] = logging.getLogger(__name__) def lowerCAmelCase_ ( ): __snake_case : int = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=__lowerCamelCase , default=1_0_0_0 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=__lowerCamelCase , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) __snake_case : List[str] = parser.parse_args() return args def lowerCAmelCase_ ( __lowerCamelCase ): def fn(__lowerCamelCase ): return tokenizer(examples["text"] ) return fn def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Tuple = [] for i in range(len(tokenized_data["input_ids"] ) ): __snake_case : Tuple = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } __snake_case : List[Any] = tf.train.Features(feature=__lowerCamelCase ) __snake_case : str = tf.train.Example(features=__lowerCamelCase ) __snake_case : List[str] = example.SerializeToString() records.append(__lowerCamelCase ) return records def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Optional[int] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: __snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit ) __snake_case : Dict = dataset.select(range(__lowerCamelCase ) ) print(F'Limiting the dataset to {args.limit} entries.' ) __snake_case : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) __snake_case : Dict = os.path.join(args.output_dir , args.split ) if not os.path.exists(__lowerCamelCase ): os.makedirs(__lowerCamelCase ) else: __snake_case : str = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. __snake_case : Any = tokenize_function(__lowerCamelCase ) __snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__lowerCamelCase ): # Concatenate all texts. __snake_case : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()} __snake_case : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 __snake_case : Any = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. __snake_case : int = { k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )] for k, t in concatenated_examples.items() } return result __snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1_0_0_0 , num_proc=4 ) __snake_case : Optional[Any] = 0 __snake_case : Optional[Any] = 0 for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ): __snake_case : List[str] = grouped_dataset[shard : shard + args.shard_size] __snake_case : Any = len(dataset_snapshot["input_ids"] ) __snake_case : List[Any] = os.path.join(__lowerCamelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' ) __snake_case : Optional[Any] = get_serialized_examples(__lowerCamelCase ) with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file: for i in range(len(__lowerCamelCase ) ): __snake_case : Union[str, Any] = serialized_examples[i] out_file.write(__lowerCamelCase ) print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) ) shard_count += 1 total_records += records_containing with open(F'split-{args.split}-records-count.txt' , "w" ) as f: print(F'Total {args.split} records: {total_records}' , file=__lowerCamelCase ) if __name__ == "__main__": _snake_case : List[Any] = parse_args() main(args)
81
1
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _lowerCAmelCase ( *__lowerCamelCase:str , __lowerCamelCase:Optional[Union[Dict, Any]] = None , __lowerCamelCase:List[Any]=True , __lowerCamelCase:str=2 ): '''simple docstring''' from .. import __version__ __magic_name__ = take_from __magic_name__ = () if not isinstance(args[0] , __lowerCamelCase ): __magic_name__ = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__lowerCamelCase ).base_version ) >= version.parse(__lowerCamelCase ): raise ValueError( f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' f''' version {__version__} is >= {version_name}''' ) __magic_name__ = None if isinstance(__lowerCamelCase , __lowerCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__lowerCamelCase ),) __magic_name__ = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(__lowerCamelCase , __lowerCamelCase ): values += (getattr(__lowerCamelCase , __lowerCamelCase ),) __magic_name__ = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: __magic_name__ = f'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: __magic_name__ = warning + " " if standard_warn else "" warnings.warn(warning + message , __lowerCamelCase , stacklevel=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0: __magic_name__ = inspect.getouterframes(inspect.currentframe() )[1] __magic_name__ = call_frame.filename __magic_name__ = call_frame.lineno __magic_name__ = call_frame.function __magic_name__ , __magic_name__ = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(__lowerCamelCase ) == 0: return elif len(__lowerCamelCase ) == 1: return values[0] return values
468
"""simple docstring""" import colorsys from PIL import Image # type: ignore def _lowerCAmelCase ( __lowerCamelCase:float , __lowerCamelCase:float , __lowerCamelCase:int ): '''simple docstring''' __magic_name__ = x __magic_name__ = y for step in range(__lowerCamelCase ): # noqa: B007 __magic_name__ = a * a - b * b + x __magic_name__ = 2 * a * b + y __magic_name__ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCAmelCase ( __lowerCamelCase:float ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def _lowerCAmelCase ( __lowerCamelCase:float ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__lowerCamelCase , 1 , 1 ) ) def _lowerCAmelCase ( __lowerCamelCase:int = 8_0_0 , __lowerCamelCase:int = 6_0_0 , __lowerCamelCase:float = -0.6 , __lowerCamelCase:float = 0 , __lowerCamelCase:float = 3.2 , __lowerCamelCase:int = 5_0 , __lowerCamelCase:bool = True , ): '''simple docstring''' __magic_name__ = Image.new("RGB" , (image_width, image_height) ) __magic_name__ = img.load() # loop through the image-coordinates for image_x in range(__lowerCamelCase ): for image_y in range(__lowerCamelCase ): # determine the figure-coordinates based on the image-coordinates __magic_name__ = figure_width / image_width * image_height __magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width __magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height __magic_name__ = get_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __magic_name__ = get_color_coded_rgb(__lowerCamelCase ) else: __magic_name__ = get_black_and_white_rgb(__lowerCamelCase ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
468
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A__ : str = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] = ['''DeiTFeatureExtractor'''] A__ : Union[str, Any] = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : str = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
171
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler A__ : Union[str, Any] = 16 A__ : int = 32 def UpperCamelCase( __UpperCamelCase : Tuple ): return int(x / 2**20 ) class __snake_case : def __enter__( self : str): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowerCAmelCase_ : List[str] = torch.cuda.memory_allocated() return self def __exit__( self : Any , *A_ : Dict): gc.collect() torch.cuda.empty_cache() lowerCAmelCase_ : str = torch.cuda.memory_allocated() lowerCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated() lowerCAmelCase_ : List[str] = bamb(self.end - self.begin) lowerCAmelCase_ : Optional[int] = bamb(self.peak - self.begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ,__UpperCamelCase : str = "bert-base-cased" ,__UpperCamelCase : int = 320 ,__UpperCamelCase : int = 160 ,): lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase ) lowerCAmelCase_ : Any = load_dataset( '''glue''' ,'''mrpc''' ,split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} ) def tokenize_function(__UpperCamelCase : Any ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ : Union[str, Any] = datasets.map( __UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ : List[str] = tokenized_datasets.rename_column('''label''' ,'''labels''' ) def collate_fn(__UpperCamelCase : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase_ : Union[str, Any] = DataLoader( tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase ) lowerCAmelCase_ : str = DataLoader( tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ): # Initialize accelerator lowerCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ : Any = config['''lr'''] lowerCAmelCase_ : Any = int(config['''num_epochs'''] ) lowerCAmelCase_ : Any = int(config['''seed'''] ) lowerCAmelCase_ : Dict = int(config['''batch_size'''] ) lowerCAmelCase_ : Dict = args.model_name_or_path set_seed(__UpperCamelCase ) lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.n_train ,args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ : Any = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase ) # Instantiate optimizer lowerCAmelCase_ : Any = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ : List[str] = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowerCAmelCase_ : Tuple = 1 lowerCAmelCase_ : str = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,) else: lowerCAmelCase_ : List[Any] = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.prepare( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ : str = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ : List[Any] = 0 # Now we train the model lowerCAmelCase_ : Union[str, Any] = {} for epoch in range(__UpperCamelCase ,__UpperCamelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(__UpperCamelCase ): lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ) lowerCAmelCase_ : Any = outputs.loss lowerCAmelCase_ : List[str] = loss / gradient_accumulation_steps accelerator.backward(__UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowerCAmelCase_ : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir ,'''peak_memory_utilization.json''' ) ,'''w''' ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ) def UpperCamelCase( ): lowerCAmelCase_ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' ,type=__UpperCamelCase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__UpperCamelCase ,) parser.add_argument( '''--output_dir''' ,type=__UpperCamelCase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,) parser.add_argument( '''--peak_memory_upper_bound''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' ,) parser.add_argument( '''--n_train''' ,type=__UpperCamelCase ,default=320 ,help='''Number of training examples to use.''' ,) parser.add_argument( '''--n_val''' ,type=__UpperCamelCase ,default=160 ,help='''Number of validation examples to use.''' ,) parser.add_argument( '''--num_epochs''' ,type=__UpperCamelCase ,default=1 ,help='''Number of train epochs.''' ,) lowerCAmelCase_ : Dict = parser.parse_args() lowerCAmelCase_ : int = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(__UpperCamelCase ,__UpperCamelCase ) if __name__ == "__main__": main()
171
1
import unittest from knapsack import knapsack as k class __lowerCAmelCase ( unittest.TestCase ): def A__ ( self ) -> List[str]: '''simple docstring''' _lowercase =0 _lowercase =[0] _lowercase =[0] _lowercase =len(lowerCAmelCase ) self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 ) _lowercase =[60] _lowercase =[10] _lowercase =len(lowerCAmelCase ) self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 0 ) def A__ ( self ) -> List[Any]: '''simple docstring''' _lowercase =3 _lowercase =[1, 2, 3] _lowercase =[3, 2, 1] _lowercase =len(lowerCAmelCase ) self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 5 ) def A__ ( self ) -> str: '''simple docstring''' _lowercase =50 _lowercase =[60, 100, 120] _lowercase =[10, 20, 30] _lowercase =len(lowerCAmelCase ) self.assertEqual(k.knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , 220 ) if __name__ == "__main__": unittest.main()
702
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase_ = 'bert-base-cased' lowercase_ = 'google/pegasus-xsum' lowercase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.'] lowercase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowercase_ = 'patrickvonplaten/t5-tiny-random' lowercase_ = 'sshleifer/bart-tiny-random' lowercase_ = 'sshleifer/tiny-mbart' lowercase_ = 'sshleifer/tiny-marian-en-de' def a ( A__ : Path , A__ : list ) -> Tuple: """simple docstring""" _lowercase ='\n'.join(A__ ) Path(A__ ).open('w' ).writelines(A__ ) def a ( A__ : str ) -> Union[str, Any]: """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(A__ , F'''{split}.source''' ) , A__ ) _dump_articles(os.path.join(A__ , F'''{split}.target''' ) , A__ ) return tmp_dir class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def A__ ( self , lowerCAmelCase ) -> List[str]: '''simple docstring''' _lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase ) _lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in ARTICLES ) _lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in SUMMARIES ) _lowercase =4 _lowercase =8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _lowercase , _lowercase ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. _lowercase =SeqaSeqDataset( lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=lowerCAmelCase , max_target_length=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , ) _lowercase =DataLoader(lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCAmelCase , lowerCAmelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _lowercase =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def A__ ( self , lowerCAmelCase ) -> str: '''simple docstring''' _lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase ) _lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) _lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in ARTICLES ) _lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in SUMMARIES ) _lowercase =4 _lowercase =LegacySeqaSeqDataset( lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=20 , max_target_length=lowerCAmelCase , ) _lowercase =DataLoader(lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def A__ ( self ) -> List[str]: '''simple docstring''' _lowercase =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) _lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) _lowercase =tmp_dir.joinpath('train.source' ).open().readlines() _lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCAmelCase , lowerCAmelCase , 128 , lowerCAmelCase ) _lowercase ={x.name for x in tmp_dir.iterdir()} _lowercase ={x.name for x in save_dir.iterdir()} _lowercase =save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCAmelCase ) < len(lowerCAmelCase ) assert len(lowerCAmelCase ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCAmelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def A__ ( self ) -> int: '''simple docstring''' if not FAIRSEQ_AVAILABLE: return _lowercase , _lowercase , _lowercase =self._get_dataset(max_len=64 ) _lowercase =64 _lowercase =ds.make_dynamic_sampler(lowerCAmelCase , required_batch_size_multiple=lowerCAmelCase ) _lowercase =[len(lowerCAmelCase ) for x in batch_sampler] assert len(set(lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCAmelCase ) == len(lowerCAmelCase ) # no dropped or added examples _lowercase =DataLoader(lowerCAmelCase , batch_sampler=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 ) _lowercase =[] _lowercase =[] for batch in data_loader: _lowercase =batch['input_ids'].shape _lowercase =src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _lowercase =np.product(batch['input_ids'].shape ) num_src_per_batch.append(lowerCAmelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCAmelCase ) assert num_src_per_batch[0] == max(lowerCAmelCase ) if failures: raise AssertionError(F'''too many tokens in {len(lowerCAmelCase )} batches''' ) def A__ ( self ) -> List[str]: '''simple docstring''' _lowercase , _lowercase , _lowercase =self._get_dataset(max_len=512 ) _lowercase =2 _lowercase =ds.make_sortish_sampler(lowerCAmelCase , shuffle=lowerCAmelCase ) _lowercase =DataLoader(lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 ) _lowercase =DataLoader(lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCAmelCase ) _lowercase =tokenizer.pad_token_id def count_pad_tokens(lowerCAmelCase , lowerCAmelCase="input_ids" ): return [batch[k].eq(lowerCAmelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCAmelCase , k='labels' ) ) < sum(count_pad_tokens(lowerCAmelCase , k='labels' ) ) assert sum(count_pad_tokens(lowerCAmelCase ) ) < sum(count_pad_tokens(lowerCAmelCase ) ) assert len(lowerCAmelCase ) == len(lowerCAmelCase ) def A__ ( self , lowerCAmelCase=1_000 , lowerCAmelCase=128 ) -> Union[str, Any]: '''simple docstring''' if os.getenv('USE_REAL_DATA' , lowerCAmelCase ): _lowercase ='examples/seq2seq/wmt_en_ro' _lowercase =max_len * 2 * 64 if not Path(lowerCAmelCase ).joinpath('train.len' ).exists(): save_len_file(lowerCAmelCase , lowerCAmelCase ) else: _lowercase ='examples/seq2seq/test_data/wmt_en_ro' _lowercase =max_len * 4 save_len_file(lowerCAmelCase , lowerCAmelCase ) _lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase ) _lowercase =SeqaSeqDataset( lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=lowerCAmelCase , max_target_length=lowerCAmelCase , n_obs=lowerCAmelCase , ) return ds, max_tokens, tokenizer def A__ ( self ) -> int: '''simple docstring''' _lowercase , _lowercase , _lowercase =self._get_dataset() _lowercase =set(DistributedSortishSampler(lowerCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCAmelCase ) ) _lowercase =set(DistributedSortishSampler(lowerCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCAmelCase ) ) assert idsa.intersection(lowerCAmelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def A__ ( self , lowerCAmelCase ) -> Dict: '''simple docstring''' _lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase , use_fast=lowerCAmelCase ) if tok_name == MBART_TINY: _lowercase =SeqaSeqDataset( lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) _lowercase =train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _lowercase =SeqaSeqDataset( lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) _lowercase =train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(lowerCAmelCase ) == 0
380
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 'mctct' def __init__( self : Any ,lowercase__ : Any=8_0_6_5 ,lowercase__ : Optional[Any]=1_5_3_6 ,lowercase__ : Dict=3_6 ,lowercase__ : List[Any]=6_1_4_4 ,lowercase__ : Any=4 ,lowercase__ : Tuple=3_8_4 ,lowercase__ : Tuple=9_2_0 ,lowercase__ : List[Any]=1e-5 ,lowercase__ : Optional[Any]=0.3 ,lowercase__ : str="relu" ,lowercase__ : Optional[Any]=0.0_2 ,lowercase__ : List[str]=0.3 ,lowercase__ : Any=0.3 ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=0 ,lowercase__ : Dict=2 ,lowercase__ : int=1 ,lowercase__ : Optional[Any]=0.3 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Optional[int]=(7,) ,lowercase__ : Optional[int]=(3,) ,lowercase__ : Dict=8_0 ,lowercase__ : List[Any]=1 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple="sum" ,lowercase__ : Optional[int]=False ,**lowercase__ : Dict ,): super().__init__(**lowercase__ ,pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ) __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = intermediate_size __lowercase = num_attention_heads __lowercase = attention_head_dim __lowercase = max_position_embeddings __lowercase = layer_norm_eps __lowercase = layerdrop __lowercase = hidden_act __lowercase = initializer_range __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = pad_token_id __lowercase = bos_token_id __lowercase = eos_token_id __lowercase = conv_glu_dim __lowercase = conv_dropout __lowercase = num_conv_layers __lowercase = input_feat_per_channel __lowercase = input_channels __lowercase = conv_channels __lowercase = ctc_loss_reduction __lowercase = ctc_zero_infinity # prevents config testing fail with exporting to json __lowercase = list(lowercase__ ) __lowercase = list(lowercase__ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ''' F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, " F"`config.num_conv_layers = {self.num_conv_layers}`." )
41
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : bool = False ): '''simple docstring''' if radian_mode: return [magnitude * cos(__magic_name__ ), magnitude * sin(__magic_name__ )] return [magnitude * cos(radians(__magic_name__ ) ), magnitude * sin(radians(__magic_name__ ) )] def UpperCAmelCase__ ( __magic_name__ : NDArray[floataa] , __magic_name__ : NDArray[floataa] , __magic_name__ : float = 10**-1 ): '''simple docstring''' lowerCAmelCase : NDArray[floataa] = cross(__magic_name__ , __magic_name__ ) lowerCAmelCase : float = sum(__magic_name__ ) return abs(__magic_name__ ) < eps if __name__ == "__main__": # Test to check if it works __SCREAMING_SNAKE_CASE : str = array( [ polar_force(718.4, 1_80 - 30), polar_force(879.54, 45), polar_force(1_00, -90), ] ) __SCREAMING_SNAKE_CASE : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __SCREAMING_SNAKE_CASE : List[Any] = array( [ polar_force(30 * 9.81, 15), polar_force(2_15, 1_80 - 45), polar_force(2_64, 90 - 30), ] ) __SCREAMING_SNAKE_CASE : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __SCREAMING_SNAKE_CASE : List[Any] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]]) __SCREAMING_SNAKE_CASE : List[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
348
0
import os def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(lowerCAmelCase_ ) ,lowerCAmelCase_ ) ) as input_file: SCREAMING_SNAKE_CASE_ : Optional[Any] =[ [int(lowerCAmelCase_ ) for element in line.split(',' )] for line in input_file.readlines() ] SCREAMING_SNAKE_CASE_ : Optional[Any] =len(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Dict =len(matrix[0] ) SCREAMING_SNAKE_CASE_ : Any =[[-1 for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )] for i in range(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ : Any =matrix[i][0] for j in range(1 ,lowerCAmelCase_ ): for i in range(lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ : List[str] =minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 ,lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ : str =min( minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 ,-1 ,-1 ): SCREAMING_SNAKE_CASE_ : int =min( minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
719
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __A , __A , unittest.TestCase ): '''simple docstring''' _lowercase = StableDiffusionDiffEditPipeline _lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} _lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} _lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase = frozenset([] ) def __lowerCamelCase ( self ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[str] =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_zero=__UpperCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[str] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE_ : str =CLIPTextModel(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE_ : Tuple ={ 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): SCREAMING_SNAKE_CASE_ : Any =floats_tensor((1, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) if str(__UpperCAmelCase ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ : Any =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] ={ 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ) if str(__UpperCAmelCase ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ : List[Any] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] ={ 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ) if str(__UpperCAmelCase ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ : Tuple =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] ={ 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self ): if not hasattr(self.pipeline_class , '_optional_components' ): return SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =pipe(**__UpperCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class.from_pretrained(__UpperCAmelCase ) pipe_loaded.to(__UpperCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe_loaded(**__UpperCAmelCase )[0] SCREAMING_SNAKE_CASE_ : str =np.abs(output - output_loaded ).max() self.assertLess(__UpperCAmelCase , 1E-4 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] ='cpu' SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_mask_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) SCREAMING_SNAKE_CASE_ : str =np.array([0] * 9 ) SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int ='cpu' SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict =self.get_dummy_inversion_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.invert(**__UpperCAmelCase ).images SCREAMING_SNAKE_CASE_ : str =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE_ : Tuple =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) SCREAMING_SNAKE_CASE_ : int =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) def __lowerCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int ='cpu' SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Dict ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE_ : str =DPMSolverMultistepScheduler(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =DPMSolverMultistepInverseScheduler(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inversion_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =pipe.invert(**__UpperCAmelCase ).images SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __lowerCamelCase ( cls ): SCREAMING_SNAKE_CASE_ : Tuple =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE_ : Any =raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE_ : Optional[int] =raw_image def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : Dict =DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] ='a bowl of fruit' SCREAMING_SNAKE_CASE_ : Optional[int] ='a bowl of pears' SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask( image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.invert( prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase ).latents SCREAMING_SNAKE_CASE_ : List[Any] =pipe( prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE_ : Optional[Any] =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Optional[int] =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str ='a bowl of fruit' SCREAMING_SNAKE_CASE_ : str ='a bowl of pears' SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask( image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.invert( prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe( prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE_ : List[str] =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
153
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : Optional[Any] = { "configuration_table_transformer": [ "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TableTransformerConfig", "TableTransformerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
21
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __A ( UpperCamelCase__ ): UpperCamelCase = """xlm-roberta-xl""" def __init__( self :Dict , __snake_case :Optional[Any]=25_08_80 , __snake_case :List[Any]=25_60 , __snake_case :Optional[Any]=36 , __snake_case :Any=32 , __snake_case :int=1_02_40 , __snake_case :List[Any]="gelu" , __snake_case :Union[str, Any]=0.1 , __snake_case :Optional[Any]=0.1 , __snake_case :str=5_14 , __snake_case :Union[str, Any]=1 , __snake_case :Optional[int]=0.02 , __snake_case :str=1E-05 , __snake_case :str=1 , __snake_case :int=0 , __snake_case :Tuple=2 , __snake_case :Optional[int]="absolute" , __snake_case :str=True , __snake_case :Any=None , **__snake_case :Dict , ): '''simple docstring''' super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) __magic_name__ : List[str] =vocab_size __magic_name__ : List[str] =hidden_size __magic_name__ : Union[str, Any] =num_hidden_layers __magic_name__ : Any =num_attention_heads __magic_name__ : Any =hidden_act __magic_name__ : List[str] =intermediate_size __magic_name__ : Any =hidden_dropout_prob __magic_name__ : Union[str, Any] =attention_probs_dropout_prob __magic_name__ : Any =max_position_embeddings __magic_name__ : Any =type_vocab_size __magic_name__ : List[str] =initializer_range __magic_name__ : Optional[int] =layer_norm_eps __magic_name__ : Dict =position_embedding_type __magic_name__ : Any =use_cache __magic_name__ : Dict =classifier_dropout class __A ( UpperCamelCase__ ): @property def A__ ( self :Dict ): '''simple docstring''' if self.task == "multiple-choice": __magic_name__ : str ={0: """batch""", 1: """choice""", 2: """sequence"""} else: __magic_name__ : Optional[Any] ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
21
1
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : int , __lowercase : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""" ) __a = img __a = img.shape[1] __a = img.shape[0] __a = dst_width __a = dst_height __a = self.src_w / self.dst_w __a = self.src_h / self.dst_h __a = __a = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): __a = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )] def UpperCamelCase_ ( self : str , __lowercase : int ): '''simple docstring''' return int(self.ratio_x * x ) def UpperCamelCase_ ( self : List[Any] , __lowercase : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": lowerCamelCase__ , lowerCamelCase__ = 800, 600 lowerCamelCase__ = imread("""image_data/lena.jpg""", 1) lowerCamelCase__ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output ) waitKey(0) destroyAllWindows()
547
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=False ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __a = len(set_a.intersection(_SCREAMING_SNAKE_CASE ) ) if alternative_union: __a = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) else: __a = len(set_a.union(_SCREAMING_SNAKE_CASE ) ) return intersection / union if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): __a = [element for element in set_a if element in set_b] if alternative_union: __a = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) / union else: __a = set_a + [element for element in set_b if element not in set_a] return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) return None if __name__ == "__main__": lowerCamelCase__ = {"""a""", """b""", """c""", """d""", """e"""} lowerCamelCase__ = {"""c""", """d""", """e""", """f""", """h""", """i"""} print(jaccard_similarity(set_a, set_b))
547
1
def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[float]] ) -> list[list[float]]: """simple docstring""" __A = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[float]] , __lowercase : list[int] ) -> list[list[float]]: """simple docstring""" __A = [] for dlist, weight in zip(__lowercase , __lowercase ): __A = min(__lowercase ) __A = max(__lowercase ) __A = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: __A = f"Invalid weight of {weight:f} provided" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[float]] ) -> list[float]: """simple docstring""" __A = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): __A = final_scores[j] + ele return final_scores def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[float]] , __lowercase : list[int] ) -> list[list[float]]: """simple docstring""" __A = get_data(__lowercase ) __A = calculate_each_score(__lowercase , __lowercase ) __A = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
637
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowercase ( lowercase_ ): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ): """simple docstring""" __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope __A = q_groups __A = k_groups __A = v_groups __A = post_attention_groups __A = intermediate_groups __A = output_groups def lowerCAmelCase_ ( self : Dict ): """simple docstring""" __A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __A = ids_tensor([self.batch_size] , self.num_choices ) __A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Any ): """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ): """simple docstring""" __A = SqueezeBertModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , UpperCamelCase_ ) __A = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ): """simple docstring""" __A = SqueezeBertForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ): """simple docstring""" __A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ): """simple docstring""" __A = self.num_labels __A = SqueezeBertForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): """simple docstring""" __A = self.num_labels __A = SqueezeBertForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ): """simple docstring""" __A = self.num_choices __A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __A = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : int ): """simple docstring""" __A = self.prepare_config_and_inputs() ((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs __A = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) SCREAMING_SNAKE_CASE = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" __A = SqueezeBertModelTester(self ) __A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 ) def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : str ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ ) @slow def lowerCAmelCase_ ( self : int ): """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = SqueezeBertModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_sentencepiece @require_tokenizers @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" __A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) __A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) __A = model(UpperCamelCase_ )[0] __A = torch.Size((1, 3) ) self.assertEqual(output.shape , UpperCamelCase_ ) __A = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
637
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): '''simple docstring''' __A =inspect.getfile(accelerate.test_utils ) __A =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) __A =os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) __A =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def __UpperCamelCase ( self ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) __A =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase ( self ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices.''' ) __A =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase ( self ): '''simple docstring''' __A =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase ( self ): '''simple docstring''' print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) __A =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = Accelerator() _lowerCamelCase : List[str] = (accelerator.state.process_index + 2, 10) _lowerCamelCase : int = torch.randint(0, 10, shape).to(accelerator.device) _lowerCamelCase : List[str] = '''''' _lowerCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _lowerCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _lowerCamelCase : Any = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
706
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
516
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _snake_case ( lowerCamelCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[Any]: """simple docstring""" _A = tempfile.mkdtemp() _A = 8 # DPR tok _A = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _A = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(a , exist_ok=a ) _A = os.path.join(a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _A = dict(zip(a , range(len(a ) ) ) ) _A = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _A = {'''unk_token''': '''<unk>'''} _A = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(a , exist_ok=a ) _A = os.path.join(a , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(a , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(a ) ) def lowercase_ ( self ) -> DPRQuestionEncoderTokenizer: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def lowercase_ ( self ) -> BartTokenizer: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def lowercase_ ( self ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) @require_tokenizers def lowercase_ ( self ) -> Any: """simple docstring""" _A = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) _A = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) _A = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(a ) rag_tokenizer.save_pretrained(a ) _A = RagTokenizer.from_pretrained(a , config=a ) self.assertIsInstance(new_rag_tokenizer.question_encoder , a ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , a ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def lowercase_ ( self ) -> Optional[int]: """simple docstring""" _A = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) _A = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] _A = tokenizer(a ) self.assertIsNotNone(a ) @slow def lowercase_ ( self ) -> Optional[Any]: """simple docstring""" _A = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) _A = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] _A = tokenizer(a ) self.assertIsNotNone(a )
317
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Dict: """simple docstring""" if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a , ) assert hasattr(self , '''env''' ) def lowercase_ ( self , a=1 ) -> Optional[int]: """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def lowercase_ ( self , a ) -> Any: """simple docstring""" TrainingJobAnalytics(a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) def lowercase_ ( self ) -> Optional[int]: """simple docstring""" _A = self.create_estimator() # run training estimator.fit() # result dataframe _A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) _A = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _A = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a )
317
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , lowercase_ , lowercase_=13 , lowercase_=10 , lowercase_=3 , lowercase_=2 , lowercase_=2 , lowercase_=2 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=0.9 , lowercase_=None , ): snake_case_ : Union[str, Any] = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : int = image_size snake_case_ : Optional[int] = num_channels snake_case_ : int = patch_size snake_case_ : Any = tubelet_size snake_case_ : Tuple = num_frames snake_case_ : Union[str, Any] = is_training snake_case_ : Optional[Any] = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Optional[int] = intermediate_size snake_case_ : List[str] = hidden_act snake_case_ : Any = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : Dict = type_sequence_label_size snake_case_ : Tuple = initializer_range snake_case_ : Tuple = mask_ratio snake_case_ : List[str] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame snake_case_ : Optional[Any] = (image_size // patch_size) ** 2 snake_case_ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos snake_case_ : List[str] = int(mask_ratio * self.seq_length) def snake_case__ ( self): snake_case_ : List[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]) snake_case_ : List[Any] = None if self.use_labels: snake_case_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size) snake_case_ : Dict = self.get_config() return config, pixel_values, labels def snake_case__ ( self): return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_): snake_case_ : Optional[Any] = VideoMAEModel(config=_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() snake_case_ : Union[str, Any] = model(_lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_): snake_case_ : Union[str, Any] = VideoMAEForPreTraining(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch snake_case_ : Dict = torch.ones((self.num_masks,)) snake_case_ : Optional[int] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) snake_case_ : Any = mask.expand(self.batch_size , -1).bool() snake_case_ : List[str] = model(_lowerCAmelCase , _lowerCAmelCase) # model only returns predictions for masked patches snake_case_ : Union[str, Any] = mask.sum().item() snake_case_ : Tuple = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels)) def snake_case__ ( self): snake_case_ : str = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs snake_case_ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): UpperCAmelCase_ = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) UpperCAmelCase_ = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def snake_case__ ( self): snake_case_ : str = VideoMAEModelTester(self) snake_case_ : str = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37) def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_=False): snake_case_ : List[str] = copy.deepcopy(_lowerCAmelCase) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch snake_case_ : List[str] = torch.ones((self.model_tester.num_masks,)) snake_case_ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) snake_case_ : Tuple = mask.expand(self.model_tester.batch_size , -1).bool() snake_case_ : str = bool_masked_pos.to(_lowerCAmelCase) if return_labels: if model_class in [ *get_values(_lowerCAmelCase), ]: snake_case_ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase) return inputs_dict def snake_case__ ( self): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds") def snake_case__ ( self): pass def snake_case__ ( self): snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_lowerCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) snake_case_ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear)) def snake_case__ ( self): snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(_lowerCAmelCase) snake_case_ : Optional[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[int] = [*signature.parameters.keys()] snake_case_ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase) def snake_case__ ( self): snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase) def snake_case__ ( self): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase) @slow def snake_case__ ( self): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Union[str, Any] = VideoMAEModel.from_pretrained(_lowerCAmelCase) self.assertIsNotNone(_lowerCAmelCase) def snake_case__ ( self): if not self.has_attentions: pass else: snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Dict = True for model_class in self.all_model_classes: snake_case_ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks snake_case_ : str = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) snake_case_ : Dict = True snake_case_ : Any = False snake_case_ : List[str] = True snake_case_ : Union[str, Any] = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() with torch.no_grad(): snake_case_ : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase)) snake_case_ : str = outputs.attentions self.assertEqual(len(_lowerCAmelCase) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Dict = True snake_case_ : List[str] = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() with torch.no_grad(): snake_case_ : Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase)) snake_case_ : List[Any] = outputs.attentions self.assertEqual(len(_lowerCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) snake_case_ : Optional[Any] = len(_lowerCAmelCase) # Check attention is always last and order is fine snake_case_ : List[Any] = True snake_case_ : str = True snake_case_ : Optional[int] = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() with torch.no_grad(): snake_case_ : Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase)) self.assertEqual(out_len + 1 , len(_lowerCAmelCase)) snake_case_ : List[str] = outputs.attentions self.assertEqual(len(_lowerCAmelCase) , self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def snake_case__ ( self): def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_): snake_case_ : str = model_class(_lowerCAmelCase) model.to(_lowerCAmelCase) model.eval() with torch.no_grad(): snake_case_ : str = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase)) snake_case_ : List[Any] = outputs.hidden_states snake_case_ : Tuple = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(_lowerCAmelCase) , _lowerCAmelCase) snake_case_ : int = self.model_tester.seq_length - self.model_tester.num_masks snake_case_ : List[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Any = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.") def snake_case__ ( self): pass def UpperCamelCase_ ( ): """simple docstring""" snake_case_ : Union[str, Any] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) snake_case_ : Dict = np.load(__SCREAMING_SNAKE_CASE ) return list(__SCREAMING_SNAKE_CASE ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case__ ( self): return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def snake_case__ ( self): snake_case_ : Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to( _lowerCAmelCase) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_video() snake_case_ : int = image_processor(_lowerCAmelCase , return_tensors="pt").to(_lowerCAmelCase) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**_lowerCAmelCase) # verify the logits snake_case_ : Tuple = torch.Size((1, 4_00)) self.assertEqual(outputs.logits.shape , _lowerCAmelCase) snake_case_ : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421]).to(_lowerCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4)) @slow def snake_case__ ( self): snake_case_ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(_lowerCAmelCase) snake_case_ : List[str] = self.default_image_processor snake_case_ : List[str] = prepare_video() snake_case_ : Tuple = image_processor(_lowerCAmelCase , return_tensors="pt").to(_lowerCAmelCase) # add boolean mask, indicating which patches to mask snake_case_ : Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt") snake_case_ : str = torch.load(_lowerCAmelCase) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**_lowerCAmelCase) # verify the logits snake_case_ : Tuple = torch.Size([1, 14_08, 15_36]) snake_case_ : Dict = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=_lowerCAmelCase) self.assertEqual(outputs.logits.shape , _lowerCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4)) # verify the loss (`config.norm_pix_loss` = `True`) snake_case_ : List[str] = torch.tensor([0.5_142] , device=_lowerCAmelCase) self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4)) # verify the loss (`config.norm_pix_loss` = `False`) snake_case_ : Any = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCAmelCase).to( _lowerCAmelCase) with torch.no_grad(): snake_case_ : Any = model(**_lowerCAmelCase) snake_case_ : List[str] = torch.tensor(torch.tensor([0.6_469]) , device=_lowerCAmelCase) self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4))
709
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME a_ = ["small", "medium", "large"] a_ = "lm_head.decoder.weight" a_ = "lm_head.weight" def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case_ : List[Any] = torch.load(__SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = d.pop(__SCREAMING_SNAKE_CASE ) os.makedirs(__SCREAMING_SNAKE_CASE, exist_ok=__SCREAMING_SNAKE_CASE ) torch.save(__SCREAMING_SNAKE_CASE, os.path.join(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) a_ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a_ = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") a_ = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
92
0
'''simple docstring''' lowercase__ = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_02_17_66_34E-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.355818, } def __snake_case ( lowercase : str , lowercase : str , lowercase : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(lowercase )}''' ) raise ValueError(lowercase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
508
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase__ = logging.get_logger(__name__) lowercase__ = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" snake_case = """focalnet""" def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=False , UpperCAmelCase_=[1_92, 3_84, 7_68, 7_68] , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[2, 2, 2, 2] , UpperCAmelCase_=[3, 3, 3, 3] , UpperCAmelCase_="gelu" , UpperCAmelCase_=4.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=False , UpperCAmelCase_=1e-4 , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=32 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = use_conv_embed snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = focal_levels snake_case_ = focal_windows snake_case_ = hidden_act snake_case_ = mlp_ratio snake_case_ = hidden_dropout_prob snake_case_ = drop_path_rate snake_case_ = use_layerscale snake_case_ = layerscale_value snake_case_ = use_post_layernorm snake_case_ = use_post_layernorm_in_modulation snake_case_ = normalize_modulator snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = encoder_stride snake_case_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
508
1
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def _A (lowerCAmelCase__ :int = 2_00_00_00 ) -> int: '''simple docstring''' _a = [0] _a = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target _a = 0 # the area corresponding to the grid that gives the product closest to target _a = 0 # an estimate of b, using the quadratic formula _a = 42 # the largest integer less than b_estimate _a = 42 # the largest integer less than b_estimate _a = 42 # the triangle number corresponding to b_floor _a = 42 # the triangle number corresponding to b_ceil _a = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): _a = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 _a = floor(lowerCAmelCase__ ) _a = ceil(lowerCAmelCase__ ) _a = triangle_numbers[b_floor] _a = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): _a = triangle_b_first_guess * triangle_a _a = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): _a = triangle_b_second_guess * triangle_a _a = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
532
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' _a = BertConfig.from_json_file(lowerCAmelCase__ ) print(f'Building PyTorch model from configuration: {config}' ) _a = BertForPreTraining(lowerCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , lowerCAmelCase__ ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a_ : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
532
1
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCamelCase = CodeGenTokenizer UpperCamelCase = CodeGenTokenizerFast UpperCamelCase = True UpperCamelCase = {'''add_prefix_space''': True} UpperCamelCase = False def lowercase__ ( self : int ) -> Any: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] UpperCAmelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ = {"unk_token": "<unk>"} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def lowercase__ ( self : List[str] , **_UpperCAmelCase : Dict ) -> List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase__ ( self : Any , **_UpperCAmelCase : int ) -> int: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = "lower newer" return input_text, output_text def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ = "lower newer" UpperCAmelCase_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] UpperCAmelCase_ = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase_ = tokens + [tokenizer.unk_token] UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def lowercase__ ( self : List[Any] ) -> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) UpperCAmelCase_ = "lower newer" # Testing tokenization UpperCAmelCase_ = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) UpperCAmelCase_ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids without special tokens UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids with special tokens UpperCAmelCase_ = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing the unknown token UpperCAmelCase_ = tokens + [rust_tokenizer.unk_token] UpperCAmelCase_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def lowercase__ ( self : str , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ) -> Dict: '''simple docstring''' pass def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Any=15 ) -> Union[str, Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # Simple input UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase_ = ("This is a simple input", "This is a pair") UpperCAmelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" , ) def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' UpperCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input looooooooong", "This is a simple input"] UpperCAmelCase_ = ("This is a simple input", "This is a pair") UpperCAmelCase_ = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] UpperCAmelCase_ = tokenizer.pad_token_id UpperCAmelCase_ = tokenizer(_UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" ) UpperCAmelCase_ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="np" ) UpperCAmelCase_ = tokenizer(*_UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" ) UpperCAmelCase_ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = "$$$" UpperCAmelCase_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase ) UpperCAmelCase_ = "This is a simple input" UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase_ = tokenizer.bos_token_id UpperCAmelCase_ = tokenizer(_UpperCAmelCase ) UpperCAmelCase_ = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) UpperCAmelCase_ = tokenizer.decode(out_s.input_ids ) UpperCAmelCase_ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def lowercase__ ( self : List[str] ) -> int: '''simple docstring''' UpperCAmelCase_ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" ) UpperCAmelCase_ = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" UpperCAmelCase_ = "\nif len_a > len_b: result = a\nelse: result = b" UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase ) UpperCAmelCase_ = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"] UpperCAmelCase_ = tokenizer.decode(_UpperCAmelCase , truncate_before_pattern=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' pass
82
"""simple docstring""" import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _lowerCAmelCase : Union[str, Any] = threading.Lock() _lowerCAmelCase : Optional[logging.Handler] = None _lowerCAmelCase : Union[str, Any] = { """debug""": logging.DEBUG, """info""": logging.INFO, """warning""": logging.WARNING, """error""": logging.ERROR, """critical""": logging.CRITICAL, } _lowerCAmelCase : Dict = logging.WARNING _lowerCAmelCase : Optional[Any] = True def SCREAMING_SNAKE_CASE__ ( )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ' f'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def SCREAMING_SNAKE_CASE__ ( )-> str: '''simple docstring''' return __name__.split("." )[0] def SCREAMING_SNAKE_CASE__ ( )-> logging.Logger: '''simple docstring''' return logging.getLogger(_get_library_name() ) def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return UpperCAmelCase__ : Any = logging.StreamHandler() # Set sys.stderr as stream. UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush # Apply our default configuration to the library root logger. UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) UpperCAmelCase__ : int = False def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' global _default_handler with _lock: if not _default_handler: return UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) UpperCAmelCase__ : Optional[int] = None def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' return log_levels def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None )-> logging.Logger: '''simple docstring''' if name is None: UpperCAmelCase__ : Tuple = _get_library_name() _configure_library_root_logger() return logging.getLogger(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> int: '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> None: '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' return set_verbosity(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> List[str]: '''simple docstring''' return set_verbosity(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> Tuple: '''simple docstring''' return set_verbosity(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> str: '''simple docstring''' return set_verbosity(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' _configure_library_root_logger() UpperCAmelCase__ : Dict = False def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' _configure_library_root_logger() UpperCAmelCase__ : List[Any] = True def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger().handlers for handler in handlers: UpperCAmelCase__ : Union[str, Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(snake_case ) def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' UpperCAmelCase__ : Any = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : List[str] , **snake_case : str )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , snake_case ) if no_advisory_warnings: return self.warning(*snake_case , **snake_case ) _lowerCAmelCase : int = warning_advice @functools.lru_cache(snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *snake_case : Dict , **snake_case : Any )-> Any: '''simple docstring''' self.warning(*snake_case , **snake_case ) _lowerCAmelCase : Tuple = warning_once class lowerCAmelCase__ : def __init__( self : List[str] , *snake_case__ : Any , **snake_case__ : List[str] ): # pylint: disable=unused-argument '''simple docstring''' UpperCAmelCase__ : List[Any] = args[0] if args else None def __iter__( self : Any ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' def empty_fn(*snake_case__ : Dict , **snake_case__ : str ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Optional[Any] ): '''simple docstring''' return self def __exit__( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] ): '''simple docstring''' return class lowerCAmelCase__ : def __call__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*snake_case__ , **snake_case__ ) else: return EmptyTqdm(*snake_case__ , **snake_case__ ) def __a ( self : Dict , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() _lowerCAmelCase : Optional[int] = _tqdm_cls() def SCREAMING_SNAKE_CASE__ ( )-> bool: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' global _tqdm_active UpperCAmelCase__ : int = True hf_hub_utils.enable_progress_bars() def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' global _tqdm_active UpperCAmelCase__ : Optional[Any] = False hf_hub_utils.disable_progress_bars()
438
0
"""simple docstring""" from sklearn.metrics import mean_squared_error import datasets SCREAMING_SNAKE_CASE_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' SCREAMING_SNAKE_CASE_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' SCREAMING_SNAKE_CASE_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def __A ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def __A ( self ) -> Dict: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def __A ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_="uniform_average" , snake_case_=True ) -> Dict: _UpperCAmelCase = mean_squared_error( snake_case_ , snake_case_ , sample_weight=snake_case_ , multioutput=snake_case_ , squared=snake_case_ ) return {"mse": mse}
579
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class a ( _SCREAMING_SNAKE_CASE ): """simple docstring""" A__ : int = "luke" def __init__( self , snake_case_=50267 , snake_case_=500000 , snake_case_=768 , snake_case_=256 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=True , snake_case_=None , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> Any: super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) _UpperCAmelCase = vocab_size _UpperCAmelCase = entity_vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = entity_emb_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_entity_aware_attention _UpperCAmelCase = classifier_dropout
579
1
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _SCREAMING_SNAKE_CASE (): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowerCAmelCase = '__test_patch_submodule_mock__' with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _SCREAMING_SNAKE_CASE (): assert _test_patching.open is open lowerCAmelCase = '__test_patch_submodule_builtin_mock__' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , 'open' , _UpperCAmelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _SCREAMING_SNAKE_CASE (): # pandas.read_csv is not present in _test_patching lowerCAmelCase = '__test_patch_submodule_missing_mock__' with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCAmelCase ): pass def _SCREAMING_SNAKE_CASE (): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point lowerCAmelCase = '__test_patch_submodule_missing_builtin_mock__' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , 'len' , _UpperCAmelCase ) is None with patch_submodule(_test_patching , 'len' , _UpperCAmelCase ): assert _test_patching.len is mock assert _test_patching.len is len def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '__test_patch_submodule_start_and_stop_mock__' lowerCAmelCase = patch_submodule(_test_patching , 'open' , _UpperCAmelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _SCREAMING_SNAKE_CASE (): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowerCAmelCase = '__test_patch_submodule_successive_join__' lowerCAmelCase = '__test_patch_submodule_successive_dirname__' lowerCAmelCase = '__test_patch_submodule_successive_rename__' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , 'os.rename' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.join' , _UpperCAmelCase ): with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = '__test_patch_submodule_doesnt_exist_mock__' with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCAmelCase ): pass with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCAmelCase ): pass
4
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: UpperCamelCase : List[Any] = tensor_name.split('.' ) for split in splits[:-1]: UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) UpperCamelCase : Dict = new_module UpperCamelCase : int = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) UpperCamelCase : Union[str, Any] = tensor_name in module._buffers UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) UpperCamelCase : Optional[Any] = False UpperCamelCase : str = False if is_buffer or not is_bitsandbytes_available(): UpperCamelCase : List[str] = False UpperCamelCase : Tuple = False else: UpperCamelCase : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCamelCase : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCamelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCamelCase : Dict = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[Any] = value.to('cpu' ) if value.dtype == torch.inta: UpperCamelCase : Tuple = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: UpperCamelCase : Union[str, Any] = torch.tensor(snake_case__ , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: UpperCamelCase : Union[str, Any] = new_value.T UpperCamelCase : Union[str, Any] = old_value.__dict__ if is_abit: UpperCamelCase : Optional[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: UpperCamelCase : Optional[Any] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) UpperCamelCase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: UpperCamelCase : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): UpperCamelCase : List[str] = value.to(snake_case__ ) else: UpperCamelCase : Tuple = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: UpperCamelCase : Optional[int] = new_value else: UpperCamelCase : Tuple = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) UpperCamelCase : List[str] = new_value def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=False ) -> int: for name, module in model.named_children(): if current_key_name is None: UpperCamelCase : str = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase , UpperCamelCase : Tuple = module.weight.shape else: UpperCamelCase : Any = module.in_features UpperCamelCase : List[str] = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCamelCase : Any = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCamelCase : Optional[int] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCamelCase : str = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCamelCase : int = True # Store the module class in case we need to transpose the weight later UpperCamelCase : Any = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None ) -> Optional[Any]: UpperCamelCase : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert UpperCamelCase , UpperCamelCase : List[str] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def UpperCamelCase ( *snake_case__ : Tuple , **snake_case__ : List[str] ) -> List[str]: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : str ) -> Tuple: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]: UpperCamelCase : int = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCamelCase : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCamelCase : Union[str, Any] = sum(snake_case__ , [] ) UpperCamelCase : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model UpperCamelCase : str = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCamelCase : List[Any] = list(model.named_children() ) UpperCamelCase : Optional[Any] = [list_modules[-1][0]] # add last module together with tied weights UpperCamelCase : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) UpperCamelCase : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys UpperCamelCase : Tuple = ['.weight', '.bias'] UpperCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCamelCase : Optional[int] = name.replace(snake_case__ , '' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
40
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _UpperCAmelCase : List[str] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" _UpperCAmelCase : int = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" _UpperCAmelCase : Optional[int] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __lowerCAmelCase ( datasets.Metric): def SCREAMING_SNAKE_CASE ( self: Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase ) }
715
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Optional[int] = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
453
0
import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py a__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. a__ = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. a__ = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") a__ = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. a__ = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) a__ = [ ("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""), ("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""), ("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""), ("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""), ("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""), ("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""), ("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""), ("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""), ("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""), ( """zero-shot-object-detection""", """MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForZeroShotObjectDetection""", ), ("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""), ("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""), ("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""), ("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""), ( """table-question-answering""", """MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForTableQuestionAnswering""", ), ("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""), ("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""), ( """next-sentence-prediction""", """MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""", """AutoModelForNextSentencePrediction""", ), ( """audio-frame-classification""", """MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioFrameClassification""", ), ("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""), ( """document-question-answering""", """MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForDocumentQuestionAnswering""", ), ( """visual-question-answering""", """MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForVisualQuestionAnswering""", ), ("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""), ( """zero-shot-image-classification""", """MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForZeroShotImageClassification""", ), ("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""), ("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""), ("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""), ] def _UpperCAmelCase ( a : Tuple ): snake_case__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , a ) return [m.group(0 ) for m in matches] def _UpperCAmelCase ( ): snake_case__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case__ = { config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. snake_case__ = collections.defaultdict(a ) snake_case__ = collections.defaultdict(a ) snake_case__ = collections.defaultdict(a ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(a ): snake_case__ = None if _re_tf_models.match(a ) is not None: snake_case__ = tf_models snake_case__ = _re_tf_models.match(a ).groups()[0] elif _re_flax_models.match(a ) is not None: snake_case__ = flax_models snake_case__ = _re_flax_models.match(a ).groups()[0] elif _re_pt_models.match(a ) is not None: snake_case__ = pt_models snake_case__ = _re_pt_models.match(a ).groups()[0] if lookup_dict is not None: while len(a ) > 0: if attr_name in model_prefix_to_model_type: snake_case__ = True break # Try again after removing the last word in the name snake_case__ = """""".join(camel_case_split(a )[:-1] ) snake_case__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) snake_case__ = list(a ) all_models.sort() snake_case__ = {"""model_type""": all_models} snake_case__ = [pt_models[t] for t in all_models] snake_case__ = [tf_models[t] for t in all_models] snake_case__ = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure snake_case__ = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: snake_case__ = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: snake_case__ = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: snake_case__ = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. snake_case__ = """AutoTokenizer""" snake_case__ = [processors[t] for t in all_models] return pd.DataFrame(a ) def _UpperCAmelCase ( a : List[str] ): snake_case__ = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: snake_case__ = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}'''] snake_case__ = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(a , a , a ): # The type of pipeline may not exist in this framework if not hasattr(a , a ): continue # First extract all model_names snake_case__ = [] for name in getattr(a , a ).values(): if isinstance(a , a ): model_names.append(a ) else: model_names.extend(list(a ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def _UpperCAmelCase ( a : Union[str, Any] , a : Optional[int] ): snake_case__ = get_frameworks_table() snake_case__ = Dataset.from_pandas(a ) snake_case__ = hf_hub_download( """huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=a ) snake_case__ = Dataset.from_json(a ) snake_case__ = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(a ) ) } snake_case__ = update_pipeline_and_auto_class_table(a ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. snake_case__ = sorted(table.keys() ) snake_case__ = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) snake_case__ = Dataset.from_pandas(a ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(a , """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(a , """pipeline_tags.json""" ) ) if commit_sha is not None: snake_case__ = ( F'''Update with commit {commit_sha}\n\nSee: ''' F'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: snake_case__ = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""" , folder_path=a , repo_type="""dataset""" , token=a , commit_message=a , ) def _UpperCAmelCase ( ): snake_case__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} snake_case__ = transformers_module.pipelines.SUPPORTED_TASKS snake_case__ = [] for key in pipeline_tasks: if key not in in_table: snake_case__ = pipeline_tasks[key]["""pt"""] if isinstance(a , (list, tuple) ): snake_case__ = model[0] snake_case__ = model.__name__ if model not in in_table.values(): missing.append(a ) if len(a ) > 0: snake_case__ = """, """.join(a ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ F'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""") parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""") parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""") a__ = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
654
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( a : str ): if "model" in orig_key: snake_case__ = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1] snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: snake_case__ = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: snake_case__ = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: snake_case__ = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: snake_case__ = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: snake_case__ = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: snake_case__ = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: snake_case__ = """yoso.""" + orig_key return orig_key def _UpperCAmelCase ( a : Tuple , a : Dict ): for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ = val snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""] snake_case__ = torch.arange(a ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ): snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""] snake_case__ = YosoConfig.from_json_file(a ) snake_case__ = YosoForMaskedLM(a ) snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a ) print(model.load_state_dict(a ) ) model.eval() model.save_pretrained(a ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
654
1
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __UpperCAmelCase = 500_000 __UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__) __UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def _snake_case ( A , **A ) -> int: lowerCAmelCase__ = dataset.map(**A ) @get_duration def _snake_case ( A , **A ) -> Union[str, Any]: lowerCAmelCase__ = dataset.filter(**A ) def _snake_case ( ) -> Any: lowerCAmelCase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) lowerCAmelCase__ = generate_example_dataset( os.path.join(A , '''dataset.arrow''' ) , A , num_examples=A ) lowerCAmelCase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=A ) def tokenize(A ): return tokenizer(examples['''text'''] ) lowerCAmelCase__ = map(A ) lowerCAmelCase__ = map(A , batched=A ) lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''numpy''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''pandas''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) lowerCAmelCase__ = map(A , function=A , batched=A ) lowerCAmelCase__ = filter(A ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(A , '''wb''' ) as f: f.write(json.dumps(A ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
98
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _snake_case ( A , A ) -> List[Any]: lowerCAmelCase__ = [] for part_id in partition_order: lowerCAmelCase__ = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Tuple: lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase__ = spark.range(100 ).repartition(1 ) lowerCAmelCase__ = Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Optional[int]: lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase__ = spark.range(10 ).repartition(2 ) lowerCAmelCase__ = [1, 0] lowerCAmelCase__ = _generate_iterable_examples(A , A ) # Reverse the partitions. lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Optional[Any]: lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase__ = spark.range(10 ).repartition(1 ) lowerCAmelCase__ = SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == F"""0_{i}""" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Union[str, Any]: lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase__ = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('''numpy.random.Generator''' ) as generator_mock: lowerCAmelCase__ = lambda A : x.reverse() lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowerCAmelCase__ = SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Dict: lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase__ = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowerCAmelCase__ = SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowerCAmelCase__ = SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _snake_case ( ) -> Dict: lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() lowerCAmelCase__ = spark.range(100 ).repartition(1 ) lowerCAmelCase__ = Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
98
1
from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
300
from math import pi, sqrt def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float ) -> float: if num <= 0: raise ValueError("math domain error" ) if num > 171.5: raise OverflowError("math range error" ) elif num - int(lowerCAmelCase ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(lowerCAmelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def __SCREAMING_SNAKE_CASE ( ) -> None: assert gamma(0.5 ) == sqrt(lowerCAmelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() SCREAMING_SNAKE_CASE_ = 1.0 while num: SCREAMING_SNAKE_CASE_ = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
300
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Union[str, Any] = logging.get_logger(__name__) _snake_case : int = { 'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json', 'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json', 'uclanlp/visualbert-vqa-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json', 'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json', 'uclanlp/visualbert-vcr-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = """visual_bert""" def __init__( self : Any , lowerCAmelCase_ : str=3_0_5_2_2 , lowerCAmelCase_ : Any=7_6_8 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : Any=1_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=5_1_2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-12 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Optional[int]=2 , **lowerCAmelCase_ : Optional[Any] , ) -> Dict: super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = visual_embedding_dim __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = type_vocab_size __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = bypass_transformer __lowerCAmelCase = special_visual_initialize
707
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=1_3 , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : Dict=1_6 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Optional[int]=4 , ) -> List[Any]: __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_attention_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_choices def lowercase ( self : List[str] ) -> Optional[int]: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_attention_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase ( self : Dict ) -> Dict: __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase ( self : Tuple ) -> int: __lowerCAmelCase = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs __lowerCAmelCase = True __lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = True a_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase ( self : Any ) -> Dict: __lowerCAmelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase ( self : Tuple ) -> List[str]: for model_class_name in self.all_model_classes: __lowerCAmelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCAmelCase_ ) __lowerCAmelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase_ ) @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase ( self : int ) -> int: __lowerCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCAmelCase_ ) __lowerCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) __lowerCAmelCase = model(lowerCAmelCase_ )[0] __lowerCAmelCase = [1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , lowerCAmelCase_ ) # compare the actual values for a slice. __lowerCAmelCase = np.array( [[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) ) @slow def lowercase ( self : Union[str, Any] ) -> Tuple: __lowerCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCAmelCase_ ) __lowerCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) __lowerCAmelCase = model(lowerCAmelCase_ )[0] # compare the actual values for a slice. __lowerCAmelCase = np.array( [[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
421
0
"""simple docstring""" from math import ceil, sqrt def UpperCamelCase (SCREAMING_SNAKE_CASE = 100_0000 ): UpperCamelCase : int = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: UpperCamelCase : Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: UpperCamelCase : str = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'''{solution() = }''')
102
import os from collections import deque import torch from torch.utils.data import Dataset class __A( a ): def __init__( self , _snake_case="" , _snake_case="train" ) -> Union[str, Any]: '''simple docstring''' assert os.path.isdir(_snake_case ) __a = [] __a = os.listdir(_snake_case ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __a = os.path.join(_snake_case , _snake_case ) if not os.path.isfile(_snake_case ): continue self.documents.append(_snake_case ) def __len__( self ) -> List[Any]: '''simple docstring''' return len(self.documents ) def __getitem__( self , _snake_case ) -> Optional[Any]: '''simple docstring''' __a = self.documents[idx] __a = document_path.split('''/''' )[-1] with open(_snake_case , encoding='''utf-8''' ) as source: __a = source.read() __a , __a = process_story(_snake_case ) return document_name, story_lines, summary_lines def __lowerCAmelCase ( a__ ) -> List[Any]: __a = list(filter(lambda a__ : len(a__ ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) ) # for some unknown reason some lines miss a period, add it __a = [_add_missing_period(a__ ) for line in nonempty_lines] # gather article lines __a = [] __a = deque(a__ ) while True: try: __a = lines.popleft() if element.startswith('''@highlight''' ): break story_lines.append(a__ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __a = list(filter(lambda a__ : not t.startswith('''@highlight''' ) , a__ ) ) return story_lines, summary_lines def __lowerCAmelCase ( a__ ) -> Tuple: __a = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight''' ): return line if line[-1] in END_TOKENS: return line return line + "." def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]: if len(a__ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(a__ )) ) return sequence def __lowerCAmelCase ( a__ , a__ ) -> Dict: __a = torch.ones_like(a__ ) __a = sequence == pad_token_id __a = 0 return mask def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[Any]: __a = [tokenizer.encode(a__ ) for line in story_lines] __a = [token for sentence in story_lines_token_ids for token in sentence] __a = [tokenizer.encode(a__ ) for line in summary_lines] __a = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def __lowerCAmelCase ( a__ , a__ ) -> str: __a = [] for sequence in batch: __a = -1 __a = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(a__ ) return torch.tensor(a__ )
219
0
import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class __lowercase ( a__ ): def __init__( self : Tuple , *lowercase__ : List[str] , **lowercase__ : int ): warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , lowercase__ , ) super().__init__(*lowercase__ , **lowercase__ )
702
import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home UpperCamelCase__ = HUGGINGFACE_HUB_CACHE UpperCamelCase__ = '''config.json''' UpperCamelCase__ = '''diffusion_pytorch_model.bin''' UpperCamelCase__ = '''diffusion_flax_model.msgpack''' UpperCamelCase__ = '''model.onnx''' UpperCamelCase__ = '''diffusion_pytorch_model.safetensors''' UpperCamelCase__ = '''weights.pb''' UpperCamelCase__ = '''https://huggingface.co''' UpperCamelCase__ = default_cache_path UpperCamelCase__ = '''diffusers_modules''' UpperCamelCase__ = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules''')) UpperCamelCase__ = ['''fp16''', '''non-ema'''] UpperCamelCase__ = '''.self_attn'''
143
0
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = 1 for i in range(1 ,num + 1 ): fact *= i return fact def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 while number > 0: SCREAMING_SNAKE_CASE : Optional[Any] = number % 10 sum_of_digits += last_digit SCREAMING_SNAKE_CASE : Optional[Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def lowercase__( __UpperCamelCase: int = 1_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = factorial(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = split_and_add(__UpperCamelCase ) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
28
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( _a ): lowercase_ = (DDPMParallelScheduler,) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" _a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase_ ) return config def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = self.dummy_sample_deter + 0.1 _a = self.dummy_sample_deter - 0.1 _a = samplea.shape[0] _a = torch.stack([samplea, samplea, samplea] , dim=0 ) _a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ ) _a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type='''v_prediction''' ) _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def __lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) _a = scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: _a = -1 else: _a = timesteps[i + 1] _a = scheduler.previous_timestep(lowerCAmelCase_ ) _a = prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] _a = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
22
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE = LDMTextToImagePipeline __SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } __SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } __SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS __SCREAMING_SNAKE_CASE = False def A ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) __snake_case = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __snake_case = CLIPTextModel(UpperCamelCase__ ) __snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __snake_case = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def A ( self : Any , a_ : Optional[int] , a_ : Optional[Any]=0 ): """simple docstring""" if str(UpperCamelCase__ ).startswith("mps" ): __snake_case = torch.manual_seed(UpperCamelCase__ ) else: __snake_case = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) __snake_case = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def A ( self : Tuple ): """simple docstring""" __snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case = self.get_dummy_components() __snake_case = LDMTextToImagePipeline(**UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __snake_case = self.get_dummy_inputs(UpperCamelCase__ ) __snake_case = pipe(**UpperCamelCase__ ).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __snake_case = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : str , a_ : str , a_ : int=torch.floataa , a_ : Tuple=0 ): """simple docstring""" __snake_case = torch.manual_seed(UpperCamelCase__ ) __snake_case = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 32, 32) ) __snake_case = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) __snake_case = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def A ( self : Optional[Any] ): """simple docstring""" __snake_case = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __snake_case = self.get_inputs(UpperCamelCase__ ) __snake_case = pipe(**UpperCamelCase__ ).images __snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __snake_case = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) __snake_case = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def A ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : Optional[int] , a_ : List[Any] , a_ : Optional[Any]=torch.floataa , a_ : int=0 ): """simple docstring""" __snake_case = torch.manual_seed(UpperCamelCase__ ) __snake_case = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 32, 32) ) __snake_case = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) __snake_case = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def A ( self : Any ): """simple docstring""" __snake_case = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __snake_case = self.get_inputs(UpperCamelCase__ ) __snake_case = pipe(**UpperCamelCase__ ).images[0] __snake_case = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) __snake_case = np.abs(expected_image - image ).max() assert max_diff < 1e-3
711
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray: # For applying gaussian function for each element in matrix. __snake_case = math.sqrt(_UpperCAmelCase ) __snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray: __snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _UpperCAmelCase ): for j in range(0 , _UpperCAmelCase ): __snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray: __snake_case = np.zeros(img.shape ) __snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase ) __snake_case , __snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] __snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase ) __snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase ) __snake_case = val return imga def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple: __snake_case = args[1] if args[1:] else "../image_data/lena.jpg" __snake_case = float(args[2] ) if args[2:] else 1.0 __snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: __snake_case = int(args[4] ) __snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: __snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a , a , a , a : Tuple = parse_args(sys.argv) a : Tuple = cva.imread(filename, 0) cva.imshow('''input image''', img) a : Dict = img / 255 a : str = out.astype('''float32''') a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a : Dict = out * 255 a : List[str] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
680
0
'''simple docstring''' from collections import deque from .hash_table import HashTable class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ): def __init__( self : int , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]: a_ : Tuple = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(__SCREAMING_SNAKE_CASE ) a_ : str = self.values[key] def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: return ( sum(self.charge_factor - len(__SCREAMING_SNAKE_CASE ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=None ) -> Optional[Any]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(__SCREAMING_SNAKE_CASE ) == 0 ): return key return super()._collision_resolution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
466
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ): snake_case__ = "openai/whisper-base" snake_case__ = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) snake_case__ = "transcriber" snake_case__ = WhisperProcessor snake_case__ = WhisperForConditionalGeneration snake_case__ = ["audio"] snake_case__ = ["text"] def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str: return self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: return self.model.generate(inputs=__SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0]
466
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __A ( a ): def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCamelCase =load_from_cache_file lowerCamelCase =file_format lowerCamelCase =Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def _snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
269
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ : Tuple =logging.get_logger(__name__) UpperCAmelCase__ : str =['''model.decoder.embed_positions.weights'''] def _lowercase ( _UpperCAmelCase ) -> List[Any]: if "emb" in name: lowerCamelCase =name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: lowerCamelCase =name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: lowerCamelCase =name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: lowerCamelCase =name.replace("""linear1""" , """fc1""" ) if "linear2" in name: lowerCamelCase =name.replace("""linear2""" , """fc2""" ) if "norm1" in name: lowerCamelCase =name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: lowerCamelCase =name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: lowerCamelCase =name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: lowerCamelCase =name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: lowerCamelCase =name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCamelCase =name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple[Dict, Dict]: lowerCamelCase =list(state_dict.keys() ) lowerCamelCase ={} for key in keys: lowerCamelCase =state_dict.pop(_UpperCAmelCase ) lowerCamelCase =rename_keys(_UpperCAmelCase ) if "in_proj_weight" in key: # split fused qkv proj lowerCamelCase =val[:hidden_size, :] lowerCamelCase =val[hidden_size : 2 * hidden_size, :] lowerCamelCase =val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCamelCase =val else: lowerCamelCase =val return state_dict, enc_dec_proj_state_dict def _lowercase ( _UpperCAmelCase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values lowerCamelCase =10_24 lowerCamelCase =24 lowerCamelCase =16 elif checkpoint == "medium": lowerCamelCase =15_36 lowerCamelCase =48 lowerCamelCase =24 elif checkpoint == "large": lowerCamelCase =20_48 lowerCamelCase =48 lowerCamelCase =32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) lowerCamelCase =MusicgenDecoderConfig( hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , ) return config @torch.no_grad() def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ) -> Dict: lowerCamelCase =MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase ) lowerCamelCase =decoder_config_from_checkpoint(_UpperCAmelCase ) lowerCamelCase =fairseq_model.lm.state_dict() lowerCamelCase , lowerCamelCase =rename_state_dict( _UpperCAmelCase , hidden_size=decoder_config.hidden_size ) lowerCamelCase =TaEncoderModel.from_pretrained("""t5-base""" ) lowerCamelCase =EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) lowerCamelCase =MusicgenForCausalLM(_UpperCAmelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCamelCase , lowerCamelCase =decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(_UpperCAmelCase ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model lowerCamelCase =MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase ) # check we can do a forward pass lowerCamelCase =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCamelCase =input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCamelCase =model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits if logits.shape != (8, 1, 20_48): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor lowerCamelCase =AutoTokenizer.from_pretrained("""t5-base""" ) lowerCamelCase =AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) lowerCamelCase =MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # set the appropriate bos/pad token ids lowerCamelCase =20_48 lowerCamelCase =20_48 # set other default generation config params lowerCamelCase =int(30 * audio_encoder.config.frame_rate ) lowerCamelCase =True lowerCamelCase =3.0 if pytorch_dump_folder is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(_UpperCAmelCase ) processor.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) UpperCAmelCase__ : Optional[Any] =parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
269
1
def UpperCAmelCase_ ( __UpperCAmelCase : int = 2_00 ) -> int: SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 1_00, 2_00] SCREAMING_SNAKE_CASE_ = [0] * (pence + 1) SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(__UpperCAmelCase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73_682
31
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : Optional[Any] = { """configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ """MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegatronBertForCausalLM""", """MegatronBertForMaskedLM""", """MegatronBertForMultipleChoice""", """MegatronBertForNextSentencePrediction""", """MegatronBertForPreTraining""", """MegatronBertForQuestionAnswering""", """MegatronBertForSequenceClassification""", """MegatronBertForTokenClassification""", """MegatronBertModel""", """MegatronBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys A : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
349
0
'''simple docstring''' def snake_case_ ( a__ : int = 60_08_51_47_51_43 ): """simple docstring""" try: __lowercase = int(a__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) __lowercase = 2 __lowercase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 __lowercase = i while n % i == 0: __lowercase = n // i i += 1 return int(a__ ) if __name__ == "__main__": print(F"""{solution() = }""")
719
'''simple docstring''' from statistics import mean, stdev def snake_case_ ( a__ : list ,a__ : int = 3 ): """simple docstring""" __lowercase = min(a__ ) __lowercase = max(a__ ) # normalize data return [round((x - x_min) / (x_max - x_min) ,a__ ) for x in data] def snake_case_ ( a__ : list ,a__ : int = 3 ): """simple docstring""" __lowercase = mean(a__ ) __lowercase = stdev(a__ ) # standardize data return [round((x - mu) / (sigma) ,a__ ) for x in data]
163
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self : List[Any] ): UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(a__ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , a__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1e-3 ) ) @slow def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(a__ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , a__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , a__ , atol=1e-3 ) )
51
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : int = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _A ( __magic_name__): SCREAMING_SNAKE_CASE : int = '''gpt_neo''' SCREAMING_SNAKE_CASE : Tuple = ['''past_key_values'''] SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = vocab_size SCREAMING_SNAKE_CASE_ : Any = max_position_embeddings SCREAMING_SNAKE_CASE_ : str = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_layers SCREAMING_SNAKE_CASE_ : List[str] = num_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Optional[int] = window_size SCREAMING_SNAKE_CASE_ : int = activation_function SCREAMING_SNAKE_CASE_ : Union[str, Any] = resid_dropout SCREAMING_SNAKE_CASE_ : Optional[Any] = embed_dropout SCREAMING_SNAKE_CASE_ : int = attention_dropout SCREAMING_SNAKE_CASE_ : int = classifier_dropout SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : List[Any] = use_cache SCREAMING_SNAKE_CASE_ : List[Any] = bos_token_id SCREAMING_SNAKE_CASE_ : Union[str, Any] = eos_token_id SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_types SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def A_ ( a , a , a , a ): """simple docstring""" import torch SCREAMING_SNAKE_CASE_ : List[Any] = input.size() SCREAMING_SNAKE_CASE_ : str = len(a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = shape[dimension] SCREAMING_SNAKE_CASE_ : List[Any] = torch.arange(0 , a , a ) SCREAMING_SNAKE_CASE_ : List[str] = torch.div(sizedim - size , a , rounding_mode='floor' ) + 1 SCREAMING_SNAKE_CASE_ : Tuple = torch.arange(a ) + low_indices[:min_length][:, None] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [slice(a )] * rank SCREAMING_SNAKE_CASE_ : int = indices SCREAMING_SNAKE_CASE_ : int = input[s] SCREAMING_SNAKE_CASE_ : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(a ) def A_ ( a , a ): """simple docstring""" import torch SCREAMING_SNAKE_CASE_ : str = torch.arange(1 , a ) SCREAMING_SNAKE_CASE_ : List[str] = torch.remainder(a , a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = remainders == 0 SCREAMING_SNAKE_CASE_ : Any = candidates[divisor_indices] SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.max(a ) return largest_divisor, torch.div(a , a , rounding_mode='floor' ) class _A ( __magic_name__): @property def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'} else: SCREAMING_SNAKE_CASE_ : List[Any] = {0: 'batch', 1: 'sequence'} return common_inputs @property def UpperCAmelCase ( self ): """simple docstring""" return self._config.num_heads def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( _SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE_ : int = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_ : Any = seqlen + 2 SCREAMING_SNAKE_CASE_ : Optional[int] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE_ : List[Any] = [ (torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['attention_mask'] if self.use_past: SCREAMING_SNAKE_CASE_ : Any = ordered_inputs['attention_mask'].dtype SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def UpperCAmelCase ( self ): """simple docstring""" return 13
511
0
import math def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00 ): '''simple docstring''' lowerCamelCase_ = sum(i * i for i in range(1 , n + 1 ) ) lowerCamelCase_ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
702
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } lowerCamelCase : int = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } lowerCamelCase : Tuple = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(lowercase ) for n in cs] return dict(zip(lowercase , lowercase ) ) def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Union[str, Any]="replace" , A_ : Dict="<s>" , A_ : Optional[int]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : Dict="<unk>" , A_ : Any="<pad>" , A_ : Dict="<mask>" , A_ : Union[str, Any]=False , **A_ : List[str] , ) -> Tuple: """simple docstring""" lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , ) with open(A_ , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(A_ ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(A_ , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def a__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.encoder ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Tuple , A_ : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = get_pairs(A_ ) if not pairs: return token while True: lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(A_ ): try: lowerCamelCase_ = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(A_ ) lowerCamelCase_ = new_word if len(A_ ) == 1: break else: lowerCamelCase_ = get_pairs(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = word return word def a__ ( self : str , A_ : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , A_ ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : str ) -> Optional[Any]: """simple docstring""" return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def a__ ( self : Tuple , A_ : Dict ) -> List[Any]: """simple docstring""" return self.decoder.get(A_ ) def a__ ( self : Optional[int] , A_ : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = ''.join(A_ ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Tuple , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' ) lowerCamelCase_ = 0 with open(A_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(A_ ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def a__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : str , A_ : Optional[Any] , A_ : Union[str, Any]=False , **A_ : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs) def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> Dict: """simple docstring""" return token_ids_a + [self.eos_token_id] def a__ ( self : Optional[int] , A_ : "Conversation" ) -> List[int]: """simple docstring""" lowerCamelCase_ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(A_ ) lowerCamelCase_ = ' '.join(A_ ) lowerCamelCase_ = self.encode(A_ ) if len(A_ ) > self.model_max_length: lowerCamelCase_ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
651
0
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib snake_case = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } snake_case = logging.WARNING def snake_case ( ) -> Optional[int]: _snake_case = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def snake_case ( ) -> str: return __name__.split('''.''' )[0] def snake_case ( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def snake_case ( ) -> None: # Apply our default configuration to the library root logger. _snake_case = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def snake_case ( ) -> None: _snake_case = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def snake_case ( lowerCAmelCase_ = None ) -> logging.Logger: if name is None: _snake_case = _get_library_name() return logging.getLogger(lowerCAmelCase_ ) def snake_case ( ) -> int: return _get_library_root_logger().getEffectiveLevel() def snake_case ( lowerCAmelCase_ ) -> None: _get_library_root_logger().setLevel(lowerCAmelCase_ ) def snake_case ( ) -> Tuple: return set_verbosity(lowerCAmelCase_ ) def snake_case ( ) -> str: return set_verbosity(lowerCAmelCase_ ) def snake_case ( ) -> Union[str, Any]: return set_verbosity(lowerCAmelCase_ ) def snake_case ( ) -> Any: return set_verbosity(lowerCAmelCase_ ) def snake_case ( ) -> None: _snake_case = False def snake_case ( ) -> None: _snake_case = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCAmelCase : def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[Any] ): # pylint: disable=unused-argument """simple docstring""" _snake_case = args[0] if args else None def __iter__( self : Dict ): """simple docstring""" return iter(self._iterator ) def __getattr__( self : Optional[int] , __lowerCamelCase : Tuple ): """simple docstring""" def empty_fn(*__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : int ): """simple docstring""" return self def __exit__( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ): """simple docstring""" return snake_case = True class UpperCAmelCase : def __call__( self : Union[str, Any] , *__lowerCamelCase : Dict , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Union[str, Any] ): """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase ) else: return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ): """simple docstring""" _snake_case = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() snake_case = _tqdm_cls() def snake_case ( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def snake_case ( ) -> Dict: global _tqdm_active _snake_case = True def snake_case ( ) -> List[Any]: global _tqdm_active _snake_case = False
103
"""simple docstring""" def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int: return 1 if input_a == input_a else 0 def snake_case ( ) -> None: assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
103
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __A ( __snake_case ): UpperCamelCase :Any = 42 @flax_register_to_config class __A ( nn.Module , __snake_case , __snake_case ): UpperCamelCase :List[str] = 32 UpperCamelCase :Optional[int] = 4 UpperCamelCase :Optional[Any] = 4 UpperCamelCase :Optional[Any] = ( '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''', '''DownBlock2D''', ) UpperCamelCase :Optional[Any] = ('''UpBlock2D''', '''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''') UpperCamelCase :Optional[Any] = False UpperCamelCase :Union[str, Any] = (320, 640, 1280, 1280) UpperCamelCase :int = 2 UpperCamelCase :Optional[Any] = 8 UpperCamelCase :Optional[int] = None UpperCamelCase :Optional[int] = 1280 UpperCamelCase :List[str] = 0.0 UpperCamelCase :List[Any] = False UpperCamelCase :List[str] = jnp.floataa UpperCamelCase :int = True UpperCamelCase :List[Any] = 0 UpperCamelCase :List[Any] = False def _snake_case (self , __magic_name__ ): lowerCamelCase__ : Dict = (1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase__ : Optional[int] = jnp.zeros(_lowercase , dtype=jnp.floataa ) lowerCamelCase__ : List[str] = jnp.ones((1,) , dtype=jnp.intaa ) lowerCamelCase__ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowerCamelCase__ : List[str] = jax.random.split(_lowercase ) lowerCamelCase__ : Tuple = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(_lowercase , _lowercase , _lowercase , _lowercase )["params"] def _snake_case (self ): lowerCamelCase__ : List[Any] = self.block_out_channels lowerCamelCase__ : List[Any] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( """At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase__ : Union[str, Any] = self.num_attention_heads or self.attention_head_dim # input lowerCamelCase__ : List[Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowerCamelCase__ : Optional[Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowerCamelCase__ : Optional[Any] = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype ) lowerCamelCase__ : str = self.only_cross_attention if isinstance(_lowercase , _lowercase ): lowerCamelCase__ : Dict = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowercase , _lowercase ): lowerCamelCase__ : int = (num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase__ : Union[str, Any] = [] lowerCamelCase__ : Optional[Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase__ : List[Any] = output_channel lowerCamelCase__ : Dict = block_out_channels[i] lowerCamelCase__ : Dict = i == len(_lowercase ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase__ : Optional[Any] = FlaxCrossAttnDownBlockaD( in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: lowerCamelCase__ : Optional[Any] = FlaxDownBlockaD( in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowercase ) lowerCamelCase__ : Optional[int] = down_blocks # mid lowerCamelCase__ : Tuple = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up lowerCamelCase__ : Optional[int] = [] lowerCamelCase__ : Union[str, Any] = list(reversed(_lowercase ) ) lowerCamelCase__ : List[str] = list(reversed(_lowercase ) ) lowerCamelCase__ : Tuple = list(reversed(_lowercase ) ) lowerCamelCase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): lowerCamelCase__ : Union[str, Any] = output_channel lowerCamelCase__ : Tuple = reversed_block_out_channels[i] lowerCamelCase__ : Optional[int] = reversed_block_out_channels[min(i + 1 , len(_lowercase ) - 1 )] lowerCamelCase__ : List[str] = i == len(_lowercase ) - 1 if up_block_type == "CrossAttnUpBlock2D": lowerCamelCase__ : List[Any] = FlaxCrossAttnUpBlockaD( in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: lowerCamelCase__ : Any = FlaxUpBlockaD( in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_lowercase ) lowerCamelCase__ : Union[str, Any] = output_channel lowerCamelCase__ : int = up_blocks # out lowerCamelCase__ : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowerCamelCase__ : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__ = True , __magic_name__ = False , ): if not isinstance(_lowercase , jnp.ndarray ): lowerCamelCase__ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase__ : Union[str, Any] = timesteps.astype(dtype=jnp.floataa ) lowerCamelCase__ : List[str] = jnp.expand_dims(_lowercase , 0 ) lowerCamelCase__ : Any = self.time_proj(_lowercase ) lowerCamelCase__ : Tuple = self.time_embedding(_lowercase ) # 2. pre-process lowerCamelCase__ : Optional[Any] = jnp.transpose(_lowercase , (0, 2, 3, 1) ) lowerCamelCase__ : List[str] = self.conv_in(_lowercase ) # 3. down lowerCamelCase__ : Dict = (sample,) for down_block in self.down_blocks: if isinstance(_lowercase , _lowercase ): lowerCamelCase__ : Tuple = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train ) else: lowerCamelCase__ : Tuple = down_block(_lowercase , _lowercase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: lowerCamelCase__ : Dict = () for down_block_res_sample, down_block_additional_residual in zip( _lowercase , _lowercase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) lowerCamelCase__ : Dict = new_down_block_res_samples # 4. mid lowerCamelCase__ : Dict = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: lowerCamelCase__ : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :] lowerCamelCase__ : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_lowercase , _lowercase ): lowerCamelCase__ : List[str] = up_block( _lowercase , temb=_lowercase , encoder_hidden_states=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train , ) else: lowerCamelCase__ : Tuple = up_block(_lowercase , temb=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train ) # 6. post-process lowerCamelCase__ : Optional[Any] = self.conv_norm_out(_lowercase ) lowerCamelCase__ : Optional[Any] = nn.silu(_lowercase ) lowerCamelCase__ : Any = self.conv_out(_lowercase ) lowerCamelCase__ : int = jnp.transpose(_lowercase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_lowercase )
719
from __future__ import annotations def _A (UpperCamelCase : str , UpperCamelCase : list[str] | None = None ) ->list[list[str]]: '''simple docstring''' lowerCamelCase__ : List[str] = word_bank or [] # create a table lowerCamelCase__ : int = len(UpperCamelCase ) + 1 lowerCamelCase__ : list[list[list[str]]] = [] for _ in range(UpperCamelCase ): table.append([] ) # seed value lowerCamelCase__ : Tuple = [[]] # because empty string has empty combination # iterate through the indices for i in range(UpperCamelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(UpperCamelCase )] == word: lowerCamelCase__ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(UpperCamelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(UpperCamelCase )]: combination.reverse() return table[len(UpperCamelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
96
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
557
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def __init__( self :Optional[int] , a :List[str] ) -> Union[str, Any]: __UpperCamelCase : Dict = parent def _lowerCamelCase ( self :Dict ) -> int: return {} def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase : Union[str, Any] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" __UpperCamelCase : Optional[int] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class lowerCamelCase__ ( __lowercase , unittest.TestCase): '''simple docstring''' _A = MarkupLMFeatureExtractor if is_bsa_available() else None def _lowerCamelCase ( self :List[str] ) -> str: __UpperCamelCase : List[str] = MarkupLMFeatureExtractionTester(self ) @property def _lowerCamelCase ( self :List[Any] ) -> List[str]: return self.feature_extract_tester.prepare_feat_extract_dict() def _lowerCamelCase ( self :Dict ) -> Dict: # Initialize feature_extractor __UpperCamelCase : Tuple = self.feature_extraction_class() # Test not batched input __UpperCamelCase : Union[str, Any] = get_html_strings()[0] __UpperCamelCase : Any = feature_extractor(a ) # fmt: off __UpperCamelCase : Union[str, Any] = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] __UpperCamelCase : Tuple = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , a ) self.assertEqual(encoding.xpaths , a ) # Test batched __UpperCamelCase : str = get_html_strings() __UpperCamelCase : List[Any] = feature_extractor(a ) # fmt: off __UpperCamelCase : Optional[int] = expected_nodes + [["My First Heading", "My first paragraph."]] __UpperCamelCase : Tuple = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , a ) self.assertEqual(encoding.xpaths , a )
557
1
"""simple docstring""" from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" _a : Tuple = '''philschmid/bart-large-cnn-samsum''' _a : Optional[Any] = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) _a : Union[str, Any] = '''summarizer''' _a : List[Any] = AutoTokenizer _a : Optional[Any] = AutoModelForSeqaSeqLM _a : Any = ['''text'''] _a : List[str] = ['''text'''] def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]: return self.pre_processor(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ ) def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict: return self.model.generate(**lowerCamelCase__ )[0] def UpperCAmelCase__( self , lowerCamelCase__ ) -> int: return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
717
"""simple docstring""" from functools import lru_cache @lru_cache def _lowerCamelCase ( lowerCamelCase__ : int ): if num < 0: raise ValueError("""Number should not be negative.""" ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
128
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available a__ : str = {'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = ['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
368
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : str = logging.get_logger(__name__) a__ : Any = { '''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''', '''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''', '''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''', '''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''', '''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''', '''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''', '''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''', '''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''', '''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''', '''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''', } class __snake_case ( __magic_name__ ): __lowerCAmelCase = '''xlm''' __lowerCAmelCase = { '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__( self , UpperCamelCase_=3_0145 , UpperCamelCase_=2048 , UpperCamelCase_=12 , UpperCamelCase_=16 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=512 , UpperCamelCase_=2048**-0.5 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_="first" , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> List[str]: snake_case__ = vocab_size snake_case__ = emb_dim snake_case__ = n_layers snake_case__ = n_heads snake_case__ = dropout snake_case__ = attention_dropout snake_case__ = gelu_activation snake_case__ = sinusoidal_embeddings snake_case__ = causal snake_case__ = asm snake_case__ = n_langs snake_case__ = use_lang_emb snake_case__ = layer_norm_eps snake_case__ = bos_index snake_case__ = eos_index snake_case__ = pad_index snake_case__ = unk_index snake_case__ = mask_index snake_case__ = is_encoder snake_case__ = max_position_embeddings snake_case__ = embed_init_std snake_case__ = init_std snake_case__ = summary_type snake_case__ = summary_use_proj snake_case__ = summary_activation snake_case__ = summary_proj_to_labels snake_case__ = summary_first_dropout snake_case__ = start_n_top snake_case__ = end_n_top snake_case__ = mask_token_id snake_case__ = lang_id if "n_words" in kwargs: snake_case__ = kwargs['n_words'] super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) class __snake_case ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
368
1
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() snake_case__ : List[str] = logging.get_logger(__name__) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]: _UpperCAmelCase =WavaVecaForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) _UpperCAmelCase =downstream_dict['projector.weight'] _UpperCAmelCase =downstream_dict['projector.bias'] _UpperCAmelCase =downstream_dict['model.post_net.linear.weight'] _UpperCAmelCase =downstream_dict['model.post_net.linear.bias'] return model def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]: _UpperCAmelCase =WavaVecaForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) _UpperCAmelCase =downstream_dict['model.linear.weight'] _UpperCAmelCase =downstream_dict['model.linear.bias'] return model def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]: _UpperCAmelCase =WavaVecaForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) _UpperCAmelCase =downstream_dict['connector.weight'] _UpperCAmelCase =downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase =downstream_dict[ F"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] _UpperCAmelCase =downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"] _UpperCAmelCase =downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] _UpperCAmelCase =downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] _UpperCAmelCase =downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] _UpperCAmelCase =downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] _UpperCAmelCase =downstream_dict['objective.W'] return model @torch.no_grad() def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict: _UpperCAmelCase =torch.load(lowerCamelCase_ , map_location="cpu" ) _UpperCAmelCase =checkpoint['Downstream'] _UpperCAmelCase =WavaVecaConfig.from_pretrained(lowerCamelCase_ ) _UpperCAmelCase =WavaVecaFeatureExtractor.from_pretrained( lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ ) _UpperCAmelCase =hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _UpperCAmelCase =convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) elif arch.endswith("ForAudioFrameClassification" ): _UpperCAmelCase =convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) elif arch.endswith("ForXVector" ): _UpperCAmelCase =convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase =checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(lowerCamelCase_ ) hf_model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') snake_case__ : Any = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
706
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position snake_case__ : List[Any] = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip snake_case__ : Optional[Any] = concatenate_datasets snake_case__ : str = DownloadConfig snake_case__ : Optional[int] = DownloadManager snake_case__ : List[Any] = DownloadMode snake_case__ : List[str] = DownloadConfig snake_case__ : List[str] = DownloadMode snake_case__ : List[Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
592
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase_ : Optional[int] = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
44
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=[] ) -> Union[str, Any]: __snake_case = size[0] - overlap_pixels * 2 __snake_case = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels __snake_case = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 __snake_case = np.pad(snake_case_ , mode='''linear_ramp''' , pad_width=snake_case_ , end_values=0 ) if "l" in remove_borders: __snake_case = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: __snake_case = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: __snake_case = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: __snake_case = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ) -> str: return max(snake_case_ , min(snake_case_ , snake_case_ ) ) def lowerCamelCase__ ( snake_case_ : [int] , snake_case_ : [int] , snake_case_ : [int] ) -> Optional[Any]: return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowerCamelCase__ ( snake_case_ : [int] , snake_case_ : int , snake_case_ : [int] ) -> Tuple: __snake_case = list(snake_case_ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap __snake_case = clamp_rect(snake_case_ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str: __snake_case = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(snake_case_ , (original_slice, 0) ) return result def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str ) -> Optional[Any]: __snake_case = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) __snake_case = tile.crop(snake_case_ ) return tile def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int ) -> Optional[int]: __snake_case = n % d return n - divisor class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __init__(self : Dict , a__ : AutoencoderKL , a__ : CLIPTextModel , a__ : CLIPTokenizer , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a__ : int = 350 , ): """simple docstring""" super().__init__( vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , low_res_scheduler=a__ , scheduler=a__ , max_noise_level=a__ , ) def a (self : Tuple , a__ : str , a__ : int , a__ : Tuple , a__ : List[str] , a__ : Tuple , a__ : str , a__ : Dict , **a__ : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) __snake_case = add_overlap_rect(a__ , a__ , image.size ) __snake_case = image.crop(a__ ) __snake_case = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] __snake_case = translated_slice_x - (original_image_slice / 2) __snake_case = max(0 , a__ ) __snake_case = squeeze_tile(a__ , a__ , a__ , a__ ) __snake_case = to_input.size __snake_case = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) __snake_case = super(a__ , self ).__call__(image=a__ , **a__ ).images[0] __snake_case = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) __snake_case = unsqueeze_tile(a__ , a__ ) __snake_case = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) __snake_case = [] if x == 0: remove_borders.append('''l''' ) elif crop_rect[2] == image.size[0]: remove_borders.append('''r''' ) if y == 0: remove_borders.append('''t''' ) elif crop_rect[3] == image.size[1]: remove_borders.append('''b''' ) __snake_case = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=a__ ) , mode='''L''' , ) final_image.paste( a__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , a__ ) @torch.no_grad() def __call__(self : Any , a__ : Union[str, List[str]] , a__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , a__ : int = 75 , a__ : float = 9.0 , a__ : int = 50 , a__ : Optional[Union[str, List[str]]] = None , a__ : Optional[int] = 1 , a__ : float = 0.0 , a__ : Optional[torch.Generator] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , a__ : int = 128 , a__ : int = 32 , a__ : int = 32 , ): """simple docstring""" __snake_case = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) ) __snake_case = math.ceil(image.size[0] / tile_size ) __snake_case = math.ceil(image.size[1] / tile_size ) __snake_case = tcx * tcy __snake_case = 0 for y in range(a__ ): for x in range(a__ ): self._process_tile( a__ , a__ , a__ , a__ , a__ , a__ , a__ , prompt=a__ , num_inference_steps=a__ , guidance_scale=a__ , noise_level=a__ , negative_prompt=a__ , num_images_per_prompt=a__ , eta=a__ , generator=a__ , latents=a__ , ) current_count += 1 if callback is not None: callback({'''progress''': current_count / total_tile_count, '''image''': final_image} ) return final_image def lowerCamelCase__ ( ) -> Tuple: # Run a demo __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case_ , revision='''fp16''' , torch_dtype=torch.floataa ) __snake_case = pipe.to('''cuda''' ) __snake_case = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' ) def callback(snake_case_ : Any ): print(f"""progress: {obj['progress']:.4f}""" ) obj["image"].save('''diffusers_library_progress.jpg''' ) __snake_case = pipe(image=snake_case_ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case_ ) final_image.save('''diffusers_library.jpg''' ) if __name__ == "__main__": main()
592
0
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase_ ( ): raise RuntimeError("""CUDA out of memory.""" ) class lowerCAmelCase_ (nn.Module ): """simple docstring""" def __init__(self ) -> int: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ : Any = nn.Linear(3 , 4 ) SCREAMING_SNAKE_CASE__ : List[str] = nn.BatchNormad(4 ) SCREAMING_SNAKE_CASE__ : Any = nn.Linear(4 , 5 ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class lowerCAmelCase_ (unittest.TestCase ): """simple docstring""" def __magic_name__ (self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(SCREAMING_SNAKE_CASE__ ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__a , [1_28, 64, 32, 16, 8] ) def __magic_name__ (self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga SCREAMING_SNAKE_CASE__ : List[str] = mock_training_loop_function("""hello""" ) self.assertListEqual(__a , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def __magic_name__ (self ) -> Dict: """simple docstring""" @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(SCREAMING_SNAKE_CASE__ ): pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def __magic_name__ (self ) -> Any: """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(SCREAMING_SNAKE_CASE__ ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def __magic_name__ (self ) -> List[str]: """simple docstring""" @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__a ) as cm: mock_training_loop_function(1_28 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1=\'hello\', arg2=\'world\')""" , cm.exception.args[0] ) def __magic_name__ (self ) -> Optional[int]: """simple docstring""" @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(SCREAMING_SNAKE_CASE__ ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = torch.cuda.memory_allocated() SCREAMING_SNAKE_CASE__ : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __a ) SCREAMING_SNAKE_CASE__ : List[str] = release_memory(__a ) self.assertEqual(torch.cuda.memory_allocated() , __a )
717
"""simple docstring""" from math import pi, sqrt, tan def lowercase_ ( _snake_case ): if side_length < 0: raise ValueError("""surface_area_cube() only accepts non-negative values""" ) return 6 * side_length**2 def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): if length < 0 or breadth < 0 or height < 0: raise ValueError("""surface_area_cuboid() only accepts non-negative values""" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowercase_ ( _snake_case ): if radius < 0: raise ValueError("""surface_area_sphere() only accepts non-negative values""" ) return 4 * pi * radius**2 def lowercase_ ( _snake_case ): if radius < 0: raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" ) return 3 * pi * radius**2 def lowercase_ ( _snake_case ,_snake_case ): if radius < 0 or height < 0: raise ValueError("""surface_area_cone() only accepts non-negative values""" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( """surface_area_conical_frustum() only accepts non-negative values""" ) SCREAMING_SNAKE_CASE__ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowercase_ ( _snake_case ,_snake_case ): if radius < 0 or height < 0: raise ValueError("""surface_area_cylinder() only accepts non-negative values""" ) return 2 * pi * radius * (height + radius) def lowercase_ ( _snake_case ,_snake_case ): if torus_radius < 0 or tube_radius < 0: raise ValueError("""surface_area_torus() only accepts non-negative values""" ) if torus_radius < tube_radius: raise ValueError( """surface_area_torus() does not support spindle or self intersecting tori""" ) return 4 * pow(_snake_case ,2 ) * torus_radius * tube_radius def lowercase_ ( _snake_case ,_snake_case ): if length < 0 or width < 0: raise ValueError("""area_rectangle() only accepts non-negative values""" ) return length * width def lowercase_ ( _snake_case ): if side_length < 0: raise ValueError("""area_square() only accepts non-negative values""" ) return side_length**2 def lowercase_ ( _snake_case ,_snake_case ): if base < 0 or height < 0: raise ValueError("""area_triangle() only accepts non-negative values""" ) return (base * height) / 2 def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("""Given three sides do not form a triangle""" ) SCREAMING_SNAKE_CASE__ : List[str] = (sidea + sidea + sidea) / 2 SCREAMING_SNAKE_CASE__ : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowercase_ ( _snake_case ,_snake_case ): if base < 0 or height < 0: raise ValueError("""area_parallelogram() only accepts non-negative values""" ) return base * height def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): if basea < 0 or basea < 0 or height < 0: raise ValueError("""area_trapezium() only accepts non-negative values""" ) return 1 / 2 * (basea + basea) * height def lowercase_ ( _snake_case ): if radius < 0: raise ValueError("""area_circle() only accepts non-negative values""" ) return pi * radius**2 def lowercase_ ( _snake_case ,_snake_case ): if radius_x < 0 or radius_y < 0: raise ValueError("""area_ellipse() only accepts non-negative values""" ) return pi * radius_x * radius_y def lowercase_ ( _snake_case ,_snake_case ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("""area_rhombus() only accepts non-negative values""" ) return 1 / 2 * diagonal_a * diagonal_a def lowercase_ ( _snake_case ,_snake_case ): if not isinstance(_snake_case ,_snake_case ) or sides < 3: raise ValueError( """area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides""" ) elif length < 0: raise ValueError( """area_reg_polygon() only accepts non-negative values as \ length of a side""" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""") print(f"""Square: {area_square(1_0) = }""") print(f"""Triangle: {area_triangle(1_0, 1_0) = }""") print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""") print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""") print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""") print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""") print(f"""Circle: {area_circle(2_0) = }""") print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""") print('\nSurface Areas of various geometric shapes: \n') print(f"""Cube: {surface_area_cube(2_0) = }""") print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""") print(f"""Sphere: {surface_area_sphere(2_0) = }""") print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""") print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""") print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""") print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""") print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""") print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""") print(f"""Square: {area_reg_polygon(4, 1_0) = }""") print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
545
0
from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging _lowercase = logging.get_logger(__name__) class __snake_case ( snake_case__ ): """simple docstring""" UpperCamelCase_ = ['input_features', 'attention_mask'] def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]: '''simple docstring''' super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ ) lowerCAmelCase_ : Optional[int] = feature_size lowerCAmelCase_ : List[Any] = sampling_rate lowerCAmelCase_ : Union[str, Any] = padding_value lowerCAmelCase_ : str = hop_length lowerCAmelCase_ : str = win_length lowerCAmelCase_ : str = frame_signal_scale lowerCAmelCase_ : Any = preemphasis_coeff lowerCAmelCase_ : Optional[Any] = mel_floor lowerCAmelCase_ : List[str] = normalize_means lowerCAmelCase_ : Optional[Any] = normalize_vars lowerCAmelCase_ : Dict = win_function lowerCAmelCase_ : List[Any] = return_attention_mask lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00 lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00 lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size ) lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1 def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray: '''simple docstring''' if self.win_function == "hamming_window": lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ ) else: lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function ) lowerCAmelCase_ : List[str] = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,) lowerCAmelCase_ : Any = spectrogram( one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,) return msfc_features.T def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]: '''simple docstring''' if self.normalize_means: lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 ) lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ ) if self.normalize_vars: lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 ) lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ ) if input_length < x.shape[0]: lowerCAmelCase_ : int = padding_value # make sure array is in float32 lowerCAmelCase_ : Any = x.astype(np.floataa ) return x def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]: '''simple docstring''' lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )] def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowerCAmelCase_ : str = is_batched_numpy or ( isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ): lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase_ : Optional[int] = [raw_speech] # extract fbank features lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech] # convert into correct format for padding lowerCAmelCase_ : int = BatchFeature({"input_features": features} ) lowerCAmelCase_ : Union[str, Any] = self.pad( lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,) # make sure list is in array format lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" ) if isinstance(input_features[0] ,lowerCAmelCase__ ): lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features] lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" ) if attention_mask is not None: lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: lowerCAmelCase_ : Dict = ( np.array(lowerCAmelCase__ ,dtype=np.intaa ) if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) lowerCAmelCase_ : List[str] = self.normalize( padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ ) if return_tensors is not None: lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ ) return padded_inputs
659
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): _lowercase = True from torch.cuda.amp import autocast _lowercase = logging.getLogger(__name__) @dataclass class __snake_case : """simple docstring""" UpperCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , ) UpperCamelCase_ = field( default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} ) UpperCamelCase_ = field( default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} ) UpperCamelCase_ = field( default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} ) def UpperCamelCase ( snake_case__ , snake_case__): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , ) lowerCAmelCase_ : str = logging.WARNING if model_args.verbose_logging: lowerCAmelCase_ : int = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank): lowerCAmelCase_ : Any = logging.INFO logger.setLevel(snake_case__) @dataclass class __snake_case : """simple docstring""" UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) UpperCamelCase_ = field( default='train' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) UpperCamelCase_ = field( default='validation' , metadata={ 'help': ( 'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'' ) } , ) UpperCamelCase_ = field( default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , ) UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) UpperCamelCase_ = field( default=1 , metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' } , ) UpperCamelCase_ = field( default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) UpperCamelCase_ = field( default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} ) @dataclass class __snake_case : """simple docstring""" UpperCamelCase_ = 42 UpperCamelCase_ = 42 UpperCamelCase_ = "longest" UpperCamelCase_ = None UpperCamelCase_ = None def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: '''simple docstring''' lowerCAmelCase_ : Tuple = self.feature_extractor.pad( lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,) lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] ) lowerCAmelCase_ : List[str] = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to( torch.long ) lowerCAmelCase_ : Optional[Any] = torch.zeros( (batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to lowerCAmelCase_ : Tuple = 1 lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices lowerCAmelCase_ : str = _compute_mask_indices( (batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,) return batch class __snake_case ( snake_case__ ): """simple docstring""" def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str: '''simple docstring''' super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ ) lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : int = max_gumbel_temp lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp lowerCAmelCase_ : str = gumbel_temp_decay def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: '''simple docstring''' model.train() lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ ) if self.use_amp: with autocast(): lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ ) else: lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": lowerCAmelCase_ : List[Any] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCAmelCase__ ).backward() elif self.use_apex: with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCAmelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) return loss.detach() def UpperCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses() configure_logger(snake_case__ , snake_case__) # Downloading and loading a dataset from the hub. lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" lowerCAmelCase_ : Any = DatasetDict() lowerCAmelCase_ : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" lowerCAmelCase_ : Union[str, Any] = DatasetDict() lowerCAmelCase_ : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__) def prepare_dataset(snake_case__): # check that all files have the correct sampling rate lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate) return batch # load audio files into numpy arrays lowerCAmelCase_ : int = datasets.map( snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names) # filter audio files that are too long lowerCAmelCase_ : int = vectorized_datasets.filter( lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate)) def normalize(snake_case__): return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate) # normalize and transform to `BatchFeatures` lowerCAmelCase_ : str = vectorized_datasets.map( snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'") lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__) lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__) lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer( model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
659
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a_ : List[str] = logging.get_logger(__name__) def UpperCAmelCase ( A__: int ) -> int: __lowerCamelCase : Any = DPTConfig() if "large" in checkpoint_url: __lowerCamelCase : Optional[Any] = 1024 __lowerCamelCase : Tuple = 4096 __lowerCamelCase : int = 24 __lowerCamelCase : str = 16 __lowerCamelCase : Optional[int] = [5, 11, 17, 23] __lowerCamelCase : List[str] = [256, 512, 1024, 1024] __lowerCamelCase : Optional[Any] = (1, 384, 384) if "ade" in checkpoint_url: __lowerCamelCase : List[str] = True __lowerCamelCase : Union[str, Any] = 150 __lowerCamelCase : Tuple = 'huggingface/label-files' __lowerCamelCase : Tuple = 'ade20k-id2label.json' __lowerCamelCase : Optional[int] = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset' ) ) , 'r' ) ) __lowerCamelCase : Tuple = {int(A__ ): v for k, v in idalabel.items()} __lowerCamelCase : List[Any] = idalabel __lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()} __lowerCamelCase : Optional[Any] = [1, 150, 480, 480] return config, expected_shape def UpperCAmelCase ( A__: str ) -> Tuple: __lowerCamelCase : Union[str, Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(A__ , A__ ) def UpperCAmelCase ( A__: int ) -> Optional[int]: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): __lowerCamelCase : Optional[int] = name.replace('pretrained.model' , 'dpt.encoder' ) if "pretrained.model" in name: __lowerCamelCase : List[Any] = name.replace('pretrained.model' , 'dpt.embeddings' ) if "patch_embed" in name: __lowerCamelCase : Union[str, Any] = name.replace('patch_embed' , 'patch_embeddings' ) if "pos_embed" in name: __lowerCamelCase : Union[str, Any] = name.replace('pos_embed' , 'position_embeddings' ) if "attn.proj" in name: __lowerCamelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "proj" in name and "project" not in name: __lowerCamelCase : Tuple = name.replace('proj' , 'projection' ) if "blocks" in name: __lowerCamelCase : Tuple = name.replace('blocks' , 'layer' ) if "mlp.fc1" in name: __lowerCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __lowerCamelCase : List[Any] = name.replace('mlp.fc2' , 'output.dense' ) if "norm1" in name: __lowerCamelCase : str = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __lowerCamelCase : int = name.replace('norm2' , 'layernorm_after' ) if "scratch.output_conv" in name: __lowerCamelCase : Any = name.replace('scratch.output_conv' , 'head' ) if "scratch" in name: __lowerCamelCase : Any = name.replace('scratch' , 'neck' ) if "layer1_rn" in name: __lowerCamelCase : str = name.replace('layer1_rn' , 'convs.0' ) if "layer2_rn" in name: __lowerCamelCase : List[Any] = name.replace('layer2_rn' , 'convs.1' ) if "layer3_rn" in name: __lowerCamelCase : Dict = name.replace('layer3_rn' , 'convs.2' ) if "layer4_rn" in name: __lowerCamelCase : List[Any] = name.replace('layer4_rn' , 'convs.3' ) if "refinenet" in name: __lowerCamelCase : Any = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 __lowerCamelCase : Any = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: __lowerCamelCase : Optional[Any] = name.replace('out_conv' , 'projection' ) if "resConfUnit1" in name: __lowerCamelCase : Dict = name.replace('resConfUnit1' , 'residual_layer1' ) if "resConfUnit2" in name: __lowerCamelCase : List[str] = name.replace('resConfUnit2' , 'residual_layer2' ) if "conv1" in name: __lowerCamelCase : Optional[Any] = name.replace('conv1' , 'convolution1' ) if "conv2" in name: __lowerCamelCase : Optional[Any] = name.replace('conv2' , 'convolution2' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: __lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' ) if "pretrained.act_postprocess2.0.project.0" in name: __lowerCamelCase : Dict = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' ) if "pretrained.act_postprocess3.0.project.0" in name: __lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' ) if "pretrained.act_postprocess4.0.project.0" in name: __lowerCamelCase : Optional[Any] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' ) # resize blocks if "pretrained.act_postprocess1.3" in name: __lowerCamelCase : Dict = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' ) if "pretrained.act_postprocess1.4" in name: __lowerCamelCase : int = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' ) if "pretrained.act_postprocess2.3" in name: __lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' ) if "pretrained.act_postprocess2.4" in name: __lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' ) if "pretrained.act_postprocess3.3" in name: __lowerCamelCase : int = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' ) if "pretrained.act_postprocess4.3" in name: __lowerCamelCase : str = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' ) if "pretrained.act_postprocess4.4" in name: __lowerCamelCase : Dict = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' ) if "pretrained" in name: __lowerCamelCase : Tuple = name.replace('pretrained' , 'dpt' ) if "bn" in name: __lowerCamelCase : Dict = name.replace('bn' , 'batch_norm' ) if "head" in name: __lowerCamelCase : Any = name.replace('head' , 'head.head' ) if "encoder.norm" in name: __lowerCamelCase : str = name.replace('encoder.norm' , 'layernorm' ) if "auxlayer" in name: __lowerCamelCase : List[Any] = name.replace('auxlayer' , 'auxiliary_head.head' ) return name def UpperCAmelCase ( A__: int , A__: List[Any] ) -> List[Any]: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase : List[str] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) __lowerCamelCase : List[Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase : Optional[int] = in_proj_weight[: config.hidden_size, :] __lowerCamelCase : int = in_proj_bias[: config.hidden_size] __lowerCamelCase : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase : str = in_proj_bias[-config.hidden_size :] def UpperCAmelCase ( ) -> Union[str, Any]: __lowerCamelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCamelCase : List[Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCAmelCase ( A__: Dict , A__: Tuple , A__: Union[str, Any] , A__: List[Any] ) -> Any: __lowerCamelCase , __lowerCamelCase : Union[str, Any] = get_dpt_config(A__ ) # load original state_dict from URL __lowerCamelCase : Any = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): __lowerCamelCase : str = state_dict.pop(A__ ) __lowerCamelCase : Any = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model __lowerCamelCase : List[Any] = DPTForSemanticSegmentation(A__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image __lowerCamelCase : List[Any] = 480 if 'ade' in checkpoint_url else 384 __lowerCamelCase : Optional[int] = DPTImageProcessor(size=A__ ) __lowerCamelCase : List[str] = prepare_img() __lowerCamelCase : Any = image_processor(A__ , return_tensors='pt' ) # forward pass __lowerCamelCase : Optional[Any] = model(**A__ ).logits if 'ade' in checkpoint_url else model(**A__ ).predicted_depth # Assert logits __lowerCamelCase : Any = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] ) if "ade" in checkpoint_url: __lowerCamelCase : str = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(A__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A__ ) if push_to_hub: print('Pushing model to hub...' ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=A__ , ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) a_ : Optional[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
263
"""simple docstring""" import random from typing import Any def UpperCAmelCase ( A__: list ) -> list[Any]: for _ in range(len(A__ ) ): __lowerCamelCase : List[Any] = random.randint(0 , len(A__ ) - 1 ) __lowerCamelCase : Optional[Any] = random.randint(0 , len(A__ ) - 1 ) __lowerCamelCase , __lowerCamelCase : Any = data[b], data[a] return data if __name__ == "__main__": a_ : Any = [0, 1, 2, 3, 4, 5, 6, 7] a_ : int = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
263
1
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ) -> Any: SCREAMING_SNAKE_CASE_ = [1] for i in range(2 , __UpperCAmelCase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = list(range(__UpperCAmelCase ) ) # Find permutation while factorials: SCREAMING_SNAKE_CASE_ = factorials.pop() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = divmod(__UpperCAmelCase , __UpperCAmelCase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
31
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class UpperCamelCase : @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return self.get_dummy_input() @property def _UpperCAmelCase ( self ) -> int: '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def _UpperCAmelCase ( self ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=False ,) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = 4 lowercase_ : Any = 32 lowercase_ : Optional[int] = (32, 32) lowercase_ : List[str] = torch.manual_seed(0 ) lowercase_ : List[Any] = torch.device(__UpperCamelCase ) lowercase_ : List[str] = (batch_size, num_channels) + sizes lowercase_ : Any = randn_tensor(__UpperCamelCase ,generator=__UpperCamelCase ,device=__UpperCamelCase ) lowercase_ : List[Any] = {'hidden_states': hidden_states} if include_temb: lowercase_ : Tuple = 128 lowercase_ : List[str] = randn_tensor((batch_size, temb_channels) ,generator=__UpperCamelCase ,device=__UpperCamelCase ) if include_res_hidden_states_tuple: lowercase_ : Tuple = torch.manual_seed(1 ) lowercase_ : Optional[Any] = (randn_tensor(__UpperCamelCase ,generator=__UpperCamelCase ,device=__UpperCamelCase ),) if include_encoder_hidden_states: lowercase_ : Any = floats_tensor((batch_size, 32, 32) ).to(__UpperCamelCase ) if include_skip_sample: lowercase_ : Dict = randn_tensor(((batch_size, 3) + sizes) ,generator=__UpperCamelCase ,device=__UpperCamelCase ) return dummy_input def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Tuple = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": lowercase_ : List[Any] = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) lowercase_ : Any = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ , lowercase_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common() lowercase_ : str = self.block_class(**__UpperCamelCase ) unet_block.to(__UpperCamelCase ) unet_block.eval() with torch.no_grad(): lowercase_ : Dict = unet_block(**__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : int = output[0] self.assertEqual(output.shape ,self.output_shape ) lowercase_ : str = output[0, -1, -3:, -3:] lowercase_ : int = torch.tensor(__UpperCamelCase ).to(__UpperCamelCase ) assert torch_all_close(output_slice.flatten() ,__UpperCamelCase ,atol=5e-3 ) @unittest.skipIf(torch_device == 'mps' ,'Training is not supported in mps' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ , lowercase_ : List[Any] = self.prepare_init_args_and_inputs_for_common() lowercase_ : Optional[int] = self.block_class(**__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() lowercase_ : Any = model(**__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = output[0] lowercase_ : Union[str, Any] = torch.device(__UpperCamelCase ) lowercase_ : Any = randn_tensor(output.shape ,device=__UpperCamelCase ) lowercase_ : List[str] = torch.nn.functional.mse_loss(__UpperCamelCase ,__UpperCamelCase ) loss.backward()
425
0
_UpperCamelCase : Optional[int] = 6_5_5_2_1 def __UpperCamelCase ( snake_case ) -> int: '''simple docstring''' __A = 1 __A = 0 for plain_chr in plain_text: __A = (a + ord(snake_case )) % MOD_ADLER __A = (b + a) % MOD_ADLER return (b << 1_6) | a
701
from __future__ import annotations import math from collections.abc import Callable def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case = 1_0_0 , ) -> float: '''simple docstring''' __A = x_start __A = fnc(snake_case ) __A = 0.0 for _ in range(snake_case ): # Approximates curve as a sequence of linear lines and sums their length __A = (x_end - x_start) / steps + xa __A = fnc(snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __A = xa __A = fxa return length if __name__ == "__main__": def __UpperCamelCase ( snake_case ) -> int: '''simple docstring''' return math.sin(1_0 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") _UpperCamelCase : Dict = 1_0 while i <= 1_0_0_0_0_0: print(F"""With {i} steps: {line_length(f, -1_0, 1_0, i)}""") i *= 1_0
341
0
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase_ : """simple docstring""" def __init__( self : str ,lowercase__ : str = "cpu" ,lowercase__ : str = "openai/clip-vit-large-patch14" ): __lowercase = device __lowercase = CLIPTokenizerFast.from_pretrained(lowercase__ ) __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] __lowercase = torchvision.transforms.Normalize(self.image_mean ,self.image_std ) __lowercase = torchvision.transforms.Resize(2_2_4 ) __lowercase = torchvision.transforms.CenterCrop(2_2_4 ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Dict ): __lowercase = self.resize(lowercase__ ) __lowercase = self.center_crop(lowercase__ ) __lowercase = self.normalize(lowercase__ ) return images def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : Any=None ,**lowercase__ : List[Any] ): __lowercase = self.tokenizer(text=lowercase__ ,**lowercase__ ) __lowercase = self.preprocess_img(lowercase__ ) __lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase_ (nn.Module ): """simple docstring""" def __init__( self : Any ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[Any]=0.0_1 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : str=None ,lowercase__ : Any=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Union[str, Any]=False ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[Any]="image" ,lowercase__ : Tuple=True ,lowercase__ : Any=False ,lowercase__ : Optional[int]=False ,lowercase__ : Optional[int]=False ,): super().__init__() __lowercase = None __lowercase = device if device else get_device() if vqgan: __lowercase = vqgan else: __lowercase = load_vqgan(self.device ,conf_path=lowercase__ ,ckpt_path=lowercase__ ) self.vqgan.eval() if clip: __lowercase = clip else: __lowercase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' ) self.clip.to(self.device ) __lowercase = ProcessorGradientFlow(device=self.device ) __lowercase = iterations __lowercase = lr __lowercase = log __lowercase = make_grid __lowercase = return_val __lowercase = quantize __lowercase = self.vqgan.decoder.z_shape def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any]=None ,lowercase__ : Tuple=None ,lowercase__ : Dict=5 ,lowercase__ : Any=True ): __lowercase = [] if output_path is None: __lowercase = '''./animation.gif''' if input_path is None: __lowercase = self.save_path __lowercase = sorted(glob(input_path + '''/*''' ) ) if not len(lowercase__ ): raise ValueError( '''No images found in save path, aborting (did you pass save_intermediate=True to the generate''' ''' function?)''' ) if len(lowercase__ ) == 1: print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' ) __lowercase = total_duration / len(lowercase__ ) __lowercase = [frame_duration] * len(lowercase__ ) if extend_frames: __lowercase = 1.5 __lowercase = 3 for file_name in paths: if file_name.endswith('''.png''' ): images.append(imageio.imread(lowercase__ ) ) imageio.mimsave(lowercase__ ,lowercase__ ,duration=lowercase__ ) print(F"gif saved to {output_path}" ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any]=None ,lowercase__ : str=None ): if not (path or img): raise ValueError('''Input either path or tensor''' ) if img is not None: raise NotImplementedError __lowercase = preprocess(Image.open(lowercase__ ) ,target_image_size=2_5_6 ).to(self.device ) __lowercase = preprocess_vqgan(lowercase__ ) __lowercase , *__lowercase = self.vqgan.encode(lowercase__ ) return z def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ): __lowercase = self.latent.detach().requires_grad_() __lowercase = base_latent + transform_vector if self.quantize: __lowercase , *__lowercase = self.vqgan.quantize(lowercase__ ) else: __lowercase = trans_latent return self.vqgan.decode(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Optional[int]=None ): __lowercase = self.clip_preprocessor(text=lowercase__ ,images=lowercase__ ,return_tensors='''pt''' ,padding=lowercase__ ) __lowercase = self.clip(**lowercase__ ) __lowercase = clip_outputs.logits_per_image if weights is not None: __lowercase = similarity_logits * weights return similarity_logits.sum() def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ): __lowercase = self._get_clip_similarity(pos_prompts['''prompts'''] ,lowercase__ ,weights=(1 / pos_prompts['''weights''']) ) if neg_prompts: __lowercase = self._get_clip_similarity(neg_prompts['''prompts'''] ,lowercase__ ,weights=neg_prompts['''weights'''] ) else: __lowercase = torch.tensor([1] ,device=self.device ) __lowercase = -torch.log(lowercase__ ) + torch.log(lowercase__ ) return loss def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ,lowercase__ : str ): __lowercase = torch.randn_like(self.latent ,requires_grad=lowercase__ ,device=self.device ) __lowercase = torch.optim.Adam([vector] ,lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() __lowercase = self._add_vector(lowercase__ ) __lowercase = loop_post_process(lowercase__ ) __lowercase = self._get_CLIP_loss(lowercase__ ,lowercase__ ,lowercase__ ) print('''CLIP loss''' ,lowercase__ ) if self.log: wandb.log({'''CLIP Loss''': clip_loss} ) clip_loss.backward(retain_graph=lowercase__ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any] ): wandb.init(reinit=lowercase__ ,project='''face-editor''' ) wandb.config.update({'''Positive Prompts''': positive_prompts} ) wandb.config.update({'''Negative Prompts''': negative_prompts} ) wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} ) if image_path: __lowercase = Image.open(lowercase__ ) __lowercase = image.resize((2_5_6, 2_5_6) ) wandb.log('''Original Image''' ,wandb.Image(lowercase__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ): if not prompts: return [] __lowercase = [] __lowercase = [] if isinstance(lowercase__ ,lowercase__ ): __lowercase = [prompt.strip() for prompt in prompts.split('''|''' )] for prompt in prompts: if isinstance(lowercase__ ,(tuple, list) ): __lowercase = prompt[0] __lowercase = float(prompt[1] ) elif ":" in prompt: __lowercase , __lowercase = prompt.split(''':''' ) __lowercase = float(lowercase__ ) else: __lowercase = prompt __lowercase = 1.0 processed_prompts.append(lowercase__ ) weights.append(lowercase__ ) return { "prompts": processed_prompts, "weights": torch.tensor(lowercase__ ,device=self.device ), } def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any]=None ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=True ,lowercase__ : str=False ,lowercase__ : Any=True ,lowercase__ : List[str]=True ,lowercase__ : int=None ,): if image_path: __lowercase = self._get_latent(lowercase__ ) else: __lowercase = torch.randn(self.latent_dim ,device=self.device ) if self.log: self._init_logging(lowercase__ ,lowercase__ ,lowercase__ ) assert pos_prompts, "You must provide at least one positive prompt." __lowercase = self.process_prompts(lowercase__ ) __lowercase = self.process_prompts(lowercase__ ) if save_final and save_path is None: __lowercase = os.path.join('''./outputs/''' ,'''_'''.join(pos_prompts['''prompts'''] ) ) if not os.path.exists(lowercase__ ): os.makedirs(lowercase__ ) else: __lowercase = save_path + '''_''' + get_timestamp() os.makedirs(lowercase__ ) __lowercase = save_path __lowercase = self.vqgan.decode(self.latent )[0] if show_intermediate: print('''Original Image''' ) show_pil(custom_to_pil(lowercase__ ) ) __lowercase = loop_post_process(lowercase__ ) for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ ,lowercase__ ,lowercase__ ) ): if show_intermediate: show_pil(lowercase__ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}.png" ) ) if self.log: wandb.log({'''Image''': wandb.Image(lowercase__ )} ) if show_final: show_pil(lowercase__ ) if save_final: transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}_final.png" ) )
41
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 10_24, '''hidden_size''': 7_68, '''max_length''': 5_12, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 10_24, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1e-5, '''token_type_vocab_size''': 2, } snake_case = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py snake_case = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] ,num_layers=predefined_args['''num_layers'''] ,units=predefined_args['''units'''] ,hidden_size=predefined_args['''hidden_size'''] ,max_length=predefined_args['''max_length'''] ,num_heads=predefined_args['''num_heads'''] ,scaled=predefined_args['''scaled'''] ,dropout=predefined_args['''dropout'''] ,output_attention=UpperCamelCase_ ,output_all_encodings=UpperCamelCase_ ,use_residual=predefined_args['''use_residual'''] ,activation=predefined_args.get('''activation''' ,'''gelu''' ) ,layer_norm_eps=predefined_args.get('''layer_norm_eps''' ,UpperCamelCase_ ) ,) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later snake_case = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab snake_case = os.path.join(get_home_dir() ,'''models''' ) snake_case = _load_vocab(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,cls=UpperCamelCase_ ) snake_case = nlp.model.BERTModel( UpperCamelCase_ ,len(UpperCamelCase_ ) ,units=predefined_args['''units'''] ,embed_size=predefined_args['''embed_size'''] ,embed_dropout=predefined_args['''embed_dropout'''] ,word_embed=predefined_args['''word_embed'''] ,use_pooler=UpperCamelCase_ ,use_token_type_embed=UpperCamelCase_ ,token_type_vocab_size=predefined_args['''token_type_vocab_size'''] ,use_classifier=UpperCamelCase_ ,use_decoder=UpperCamelCase_ ,) original_bort.load_parameters(UpperCamelCase_ ,cast_dtype=UpperCamelCase_ ,ignore_extra=UpperCamelCase_ ) snake_case = original_bort._collect_params_with_prefix() # Build our config 🤗 snake_case = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(UpperCamelCase_ ), } snake_case = BertConfig.from_dict(UpperCamelCase_ ) snake_case = BertForMaskedLM(UpperCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(UpperCamelCase_ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(UpperCamelCase_ ,UpperCamelCase_ ): snake_case = hf_param.shape snake_case = to_torch(params[gluon_param] ) snake_case = gluon_param.shape assert ( shape_hf == shape_gluon ), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param snake_case = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight ,'''word_embed.0.weight''' ) snake_case = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight ,'''encoder.position_weight''' ) snake_case = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias ,'''encoder.layer_norm.beta''' ) snake_case = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight ,'''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) snake_case = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): snake_case = hf_bort_model.bert.encoder.layer[i] # self attention snake_case = layer.attention.self snake_case = check_and_map_params( self_attn.key.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) snake_case = check_and_map_params( self_attn.key.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) snake_case = check_and_map_params( self_attn.query.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) snake_case = check_and_map_params( self_attn.query.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) snake_case = check_and_map_params( self_attn.value.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) snake_case = check_and_map_params( self_attn.value.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output snake_case = layer.attention.output snake_case = check_and_map_params( self_output.dense.bias ,F'''encoder.transformer_cells.{i}.proj.bias''' ) snake_case = check_and_map_params( self_output.dense.weight ,F'''encoder.transformer_cells.{i}.proj.weight''' ) snake_case = check_and_map_params( self_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.layer_norm.beta''' ) snake_case = check_and_map_params( self_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate snake_case = layer.intermediate snake_case = check_and_map_params( intermediate.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) snake_case = check_and_map_params( intermediate.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output snake_case = layer.output snake_case = check_and_map_params( bert_output.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) snake_case = check_and_map_params( bert_output.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) snake_case = check_and_map_params( bert_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) snake_case = check_and_map_params( bert_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models snake_case = RobertaTokenizer.from_pretrained('''roberta-base''' ) snake_case = tokenizer.encode_plus(UpperCamelCase_ )['''input_ids'''] # Get gluon output snake_case = mx.nd.array([input_ids] ) snake_case = original_bort(inputs=UpperCamelCase_ ,token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(UpperCamelCase_ ) snake_case = BertModel.from_pretrained(UpperCamelCase_ ) hf_bort_model.eval() snake_case = tokenizer.encode_plus(UpperCamelCase_ ,return_tensors='''pt''' ) snake_case = hf_bort_model(**UpperCamelCase_ )[0] snake_case = output_gluon[0].asnumpy() snake_case = output_hf[0].detach().numpy() snake_case = np.max(np.abs(hf_layer - gluon_layer ) ).item() snake_case = np.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1e-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' ,UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
550
0
lowerCamelCase_ = 6_55_21 def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = 1 UpperCamelCase__ = 0 for plain_chr in plain_text: UpperCamelCase__ = (a + ord(__a )) % MOD_ADLER UpperCamelCase__ = (b + a) % MOD_ADLER return (b << 16) | a
86
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
1
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# UpperCAmelCase__ : Any = [ # (stable-diffusion, HF Diffusers) ("time_embed.0.weight", "time_embedding.linear_1.weight"), ("time_embed.0.bias", "time_embedding.linear_1.bias"), ("time_embed.2.weight", "time_embedding.linear_2.weight"), ("time_embed.2.bias", "time_embedding.linear_2.bias"), ("input_blocks.0.0.weight", "conv_in.weight"), ("input_blocks.0.0.bias", "conv_in.bias"), ("out.0.weight", "conv_norm_out.weight"), ("out.0.bias", "conv_norm_out.bias"), ("out.2.weight", "conv_out.weight"), ("out.2.bias", "conv_out.bias"), ] UpperCAmelCase__ : str = [ # (stable-diffusion, HF Diffusers) ("in_layers.0", "norm1"), ("in_layers.2", "conv1"), ("out_layers.0", "norm2"), ("out_layers.3", "conv2"), ("emb_layers.1", "time_emb_proj"), ("skip_connection", "conv_shortcut"), ] UpperCAmelCase__ : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks UpperCAmelCase__ : Any = F"""down_blocks.{i}.resnets.{j}.""" UpperCAmelCase__ : Dict = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 UpperCAmelCase__ : Dict = F"""down_blocks.{i}.attentions.{j}.""" UpperCAmelCase__ : Optional[int] = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks UpperCAmelCase__ : int = F"""up_blocks.{i}.resnets.{j}.""" UpperCAmelCase__ : Optional[Any] = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 UpperCAmelCase__ : List[Any] = F"""up_blocks.{i}.attentions.{j}.""" UpperCAmelCase__ : Optional[Any] = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 UpperCAmelCase__ : int = F"""down_blocks.{i}.downsamplers.0.conv.""" UpperCAmelCase__ : str = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 UpperCAmelCase__ : str = F"""up_blocks.{i}.upsamplers.0.""" UpperCAmelCase__ : Union[str, Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) UpperCAmelCase__ : Tuple = "mid_block.attentions.0." UpperCAmelCase__ : List[Any] = "middle_block.1." unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): UpperCAmelCase__ : str = F"""mid_block.resnets.{j}.""" UpperCAmelCase__ : Optional[Any] = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A ( snake_case__ : Any ) -> Any: '''simple docstring''' # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. __snake_case = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: __snake_case = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: __snake_case = v.replace(snake_case__ , snake_case__ ) __snake_case = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: __snake_case = v.replace(snake_case__ , snake_case__ ) __snake_case = v __snake_case = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# UpperCAmelCase__ : Any = [ # (stable-diffusion, HF Diffusers) ("nin_shortcut", "conv_shortcut"), ("norm_out", "conv_norm_out"), ("mid.attn_1.", "mid_block.attentions.0."), ] for i in range(4): # down_blocks have two resnets for j in range(2): UpperCAmelCase__ : Any = F"""encoder.down_blocks.{i}.resnets.{j}.""" UpperCAmelCase__ : Dict = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: UpperCAmelCase__ : Tuple = F"""down_blocks.{i}.downsamplers.0.""" UpperCAmelCase__ : Optional[int] = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) UpperCAmelCase__ : Optional[Any] = F"""up_blocks.{i}.upsamplers.0.""" UpperCAmelCase__ : Tuple = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): UpperCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}.""" UpperCAmelCase__ : int = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): UpperCAmelCase__ : Optional[Any] = F"""mid_block.resnets.{i}.""" UpperCAmelCase__ : Tuple = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) UpperCAmelCase__ : Dict = [ # (stable-diffusion, HF Diffusers) ("norm.", "group_norm."), ("q.", "query."), ("k.", "key."), ("v.", "value."), ("proj_out.", "proj_attn."), ] def A ( snake_case__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A ( snake_case__ : Dict ) -> str: '''simple docstring''' __snake_case = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: __snake_case = v.replace(snake_case__ , snake_case__ ) __snake_case = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: __snake_case = v.replace(snake_case__ , snake_case__ ) __snake_case = v __snake_case = {v: vae_state_dict[k] for k, v in mapping.items()} __snake_case = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) __snake_case = reshape_weight_for_sd(snake_case__ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# UpperCAmelCase__ : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("resblocks.", "text_model.encoder.layers."), ("ln_1", "layer_norm1"), ("ln_2", "layer_norm2"), (".c_fc.", ".fc1."), (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), ] UpperCAmelCase__ : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} UpperCAmelCase__ : Any = re.compile("|".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp UpperCAmelCase__ : Optional[int] = {"q": 0, "k": 1, "v": 2} def A ( snake_case__ : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case = {} __snake_case = {} __snake_case = {} for k, v in text_enc_dict.items(): if ( k.endswith('.self_attn.q_proj.weight' ) or k.endswith('.self_attn.k_proj.weight' ) or k.endswith('.self_attn.v_proj.weight' ) ): __snake_case = k[: -len('.q_proj.weight' )] __snake_case = k[-len('q_proj.weight' )] if k_pre not in capture_qkv_weight: __snake_case = [None, None, None] __snake_case = v continue if ( k.endswith('.self_attn.q_proj.bias' ) or k.endswith('.self_attn.k_proj.bias' ) or k.endswith('.self_attn.v_proj.bias' ) ): __snake_case = k[: -len('.q_proj.bias' )] __snake_case = k[-len('q_proj.bias' )] if k_pre not in capture_qkv_bias: __snake_case = [None, None, None] __snake_case = v continue __snake_case = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ ) __snake_case = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) __snake_case = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ ) __snake_case = torch.cat(snake_case__ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) __snake_case = textenc_pattern.sub(lambda snake_case__ : protected[re.escape(m.group(0 ) )] , snake_case__ ) __snake_case = torch.cat(snake_case__ ) return new_state_dict def A ( snake_case__ : Optional[Any] ) -> Dict: '''simple docstring''' return text_enc_dict if __name__ == "__main__": UpperCAmelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt." ) UpperCAmelCase__ : Tuple = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors UpperCAmelCase__ : Any = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors") UpperCAmelCase__ : Union[str, Any] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors") UpperCAmelCase__ : Optional[int] = osp.join(args.model_path, "text_encoder", "model.safetensors") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): UpperCAmelCase__ : int = load_file(unet_path, device="cpu") else: UpperCAmelCase__ : Any = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin") UpperCAmelCase__ : Tuple = torch.load(unet_path, map_location="cpu") if osp.exists(vae_path): UpperCAmelCase__ : Union[str, Any] = load_file(vae_path, device="cpu") else: UpperCAmelCase__ : List[str] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin") UpperCAmelCase__ : List[str] = torch.load(vae_path, map_location="cpu") if osp.exists(text_enc_path): UpperCAmelCase__ : Dict = load_file(text_enc_path, device="cpu") else: UpperCAmelCase__ : Dict = osp.join(args.model_path, "text_encoder", "pytorch_model.bin") UpperCAmelCase__ : Optional[Any] = torch.load(text_enc_path, map_location="cpu") # Convert the UNet model UpperCAmelCase__ : Dict = convert_unet_state_dict(unet_state_dict) UpperCAmelCase__ : int = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} # Convert the VAE model UpperCAmelCase__ : Union[str, Any] = convert_vae_state_dict(vae_state_dict) UpperCAmelCase__ : Optional[int] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper UpperCAmelCase__ : int = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm UpperCAmelCase__ : List[str] = {"transformer." + k: v for k, v in text_enc_dict.items()} UpperCAmelCase__ : Dict = convert_text_enc_state_dict_vaa(text_enc_dict) UpperCAmelCase__ : str = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} else: UpperCAmelCase__ : Union[str, Any] = convert_text_enc_state_dict(text_enc_dict) UpperCAmelCase__ : int = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint UpperCAmelCase__ : str = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: UpperCAmelCase__ : Any = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: UpperCAmelCase__ : str = {"state_dict": state_dict} torch.save(state_dict, args.checkpoint_path)
313
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class __lowercase ( unittest.TestCase ): @parameterized.expand([(None,), ('foo.json',)]) def _a ( self , lowercase_) -> int: __snake_case = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_ , config_name=lowercase_) __snake_case = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 5_0) self.assertEqual(loaded_config.max_length , 2_0) self.assertEqual(loaded_config.max_time , lowercase_) def _a ( self) -> Optional[int]: __snake_case = AutoConfig.from_pretrained('gpt2') __snake_case = GenerationConfig.from_model_config(lowercase_) __snake_case = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(lowercase_ , lowercase_) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def _a ( self) -> str: __snake_case = GenerationConfig() __snake_case = { 'max_new_tokens': 1_0_2_4, 'foo': 'bar', } __snake_case = copy.deepcopy(lowercase_) __snake_case = generation_config.update(**lowercase_) # update_kwargs was not modified (no side effects) self.assertEqual(lowercase_ , lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_0_2_4) # `.update()` returns a dictionary of unused kwargs self.assertEqual(lowercase_ , {'foo': 'bar'}) def _a ( self) -> Optional[Any]: __snake_case = GenerationConfig() __snake_case = 'bar' with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir: generation_config.save_pretrained(lowercase_) __snake_case = GenerationConfig.from_pretrained(lowercase_) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , 'bar') __snake_case = GenerationConfig.from_model_config(lowercase_) assert not hasattr(lowercase_ , 'foo') # no new kwargs should be initialized if from config def _a ( self) -> Optional[Any]: __snake_case = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , lowercase_) self.assertEqual(default_config.num_beams , 1) __snake_case = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , lowercase_) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase_) __snake_case = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , lowercase_) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class __lowercase ( unittest.TestCase ): @classmethod def _a ( cls) -> List[str]: __snake_case = TOKEN HfFolder.save_token(lowercase_) @classmethod def _a ( cls) -> Dict: try: delete_repo(token=cls._token , repo_id='test-generation-config') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org') except HTTPError: pass def _a ( self) -> List[Any]: __snake_case = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('test-generation-config' , use_auth_token=self._token) __snake_case = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='test-generation-config') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='test-generation-config' , push_to_hub=lowercase_ , use_auth_token=self._token) __snake_case = GenerationConfig.from_pretrained(F"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) def _a ( self) -> str: __snake_case = GenerationConfig( do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token) __snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_)) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org') # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=lowercase_ , use_auth_token=self._token) __snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
313
1
import math def _A (UpperCamelCase : int ) ->str: '''simple docstring''' lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : List[Any] = 0 while num > 0: lowerCamelCase__ : Tuple = num % 8 lowerCamelCase__ : int = octal + (remainder * math.floor(math.pow(10 , UpperCamelCase ) )) counter += 1 lowerCamelCase__ : Dict = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"0o{int(UpperCamelCase )}" def _A () ->None: '''simple docstring''' print("""\n2 in octal is:""" ) print(decimal_to_octal(2 ) ) # = 2 print("""\n8 in octal is:""" ) print(decimal_to_octal(8 ) ) # = 10 print("""\n65 in octal is:""" ) print(decimal_to_octal(65 ) ) # = 101 print("""\n216 in octal is:""" ) print(decimal_to_octal(216 ) ) # = 330 print("""\n512 in octal is:""" ) print(decimal_to_octal(512 ) ) # = 1000 print("""\n""" ) if __name__ == "__main__": main()
711
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def _A (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ) ->Any: '''simple docstring''' lowerCamelCase__ : Optional[Any] = TaConfig.from_json_file(UpperCamelCase ) print(f"Building PyTorch model from configuration: {config}" ) lowerCamelCase__ : Optional[int] = TaForConditionalGeneration(UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
96
0
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = (1 - _cos) / 2 __lowerCAmelCase = 1 - _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = (1 + _cos) / 2 __lowerCAmelCase = -1 - _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = _sin / 2 __lowerCAmelCase = 0 __lowerCAmelCase = -ba __lowerCAmelCase = 1 + alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 1 / sqrt(2 ) ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 1 - alpha __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 + alpha __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba], [ba, ba, ba] ) return filt def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : float = 1 / sqrt(2 ), ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = 1 + alpha * big_a __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha * big_a __lowerCAmelCase = 1 + alpha / big_a __lowerCAmelCase = -2 * _cos __lowerCAmelCase = 1 - alpha / big_a __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : float = 1 / sqrt(2 ), ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos __lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos __lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos __lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos __lowerCAmelCase = 2 * sqrt(lowerCAmelCase_ ) * alpha __lowerCAmelCase = big_a * (pmc + aaa) __lowerCAmelCase = 2 * big_a * mpc __lowerCAmelCase = big_a * (pmc - aaa) __lowerCAmelCase = ppmc + aaa __lowerCAmelCase = -2 * pmpc __lowerCAmelCase = ppmc - aaa __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float, lowerCAmelCase_ : float = 1 / sqrt(2 ), ): __lowerCAmelCase = tau * frequency / samplerate __lowerCAmelCase = sin(lowerCAmelCase_ ) __lowerCAmelCase = cos(lowerCAmelCase_ ) __lowerCAmelCase = _sin / (2 * q_factor) __lowerCAmelCase = 10 ** (gain_db / 40) __lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos __lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos __lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos __lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos __lowerCAmelCase = 2 * sqrt(lowerCAmelCase_ ) * alpha __lowerCAmelCase = big_a * (ppmc + aaa) __lowerCAmelCase = -2 * big_a * pmpc __lowerCAmelCase = big_a * (ppmc - aaa) __lowerCAmelCase = pmc + aaa __lowerCAmelCase = 2 * mpc __lowerCAmelCase = pmc - aaa __lowerCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt
53
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process _snake_case : Optional[int] = logging.getLogger(__name__) _snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) _snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" a_ = field( default=_UpperCamelCase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , ) a_ = field( default=_UpperCamelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a_ = field( default=_UpperCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def lowercase ( self : List[Any] ) -> List[Any]: if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' ) @dataclass class _UpperCAmelCase : """simple docstring""" a_ = field( default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) a_ = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) a_ = field( default=_UpperCamelCase , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) a_ = field( default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) a_ = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) a_ = field( default=_UpperCamelCase , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def lowercase ( self : int ) -> int: if self.train_file is not None: __lowerCAmelCase = self.train_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: __lowerCAmelCase = self.validation_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ): with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f: __lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())] assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) __lowerCAmelCase = {c: dataset[c] for c in dataset.column_names} __lowerCAmelCase = refs return Dataset.from_dict(lowerCAmelCase_ ) def a_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowerCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s', lowerCAmelCase_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name ) if "validation" not in datasets.keys(): __lowerCAmelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", ) __lowerCAmelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", ) else: __lowerCAmelCase = {} if data_args.train_file is not None: __lowerCAmelCase = data_args.train_file if data_args.validation_file is not None: __lowerCAmelCase = data_args.validation_file __lowerCAmelCase = data_args.train_file.split('.' )[-1] if extension == "txt": __lowerCAmelCase = 'text' __lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: __lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ ) elif model_args.model_name_or_path: __lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ ) else: __lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) __lowerCAmelCase = { 'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: __lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ ) elif model_args.model_name_or_path: __lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) if model_args.model_name_or_path: __lowerCAmelCase = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info('Training new model from scratch' ) __lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ ) model.resize_token_embeddings(len(lowerCAmelCase_ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: __lowerCAmelCase = datasets['train'].column_names else: __lowerCAmelCase = datasets['validation'].column_names __lowerCAmelCase = 'text' if 'text' in column_names else column_names[0] __lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False def tokenize_function(lowerCAmelCase_ : str ): # Remove empty lines __lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()] return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length ) __lowerCAmelCase = datasets.map( lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: __lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file ) if data_args.validation_ref_file is not None: __lowerCAmelCase = add_chinese_references( tokenized_datasets['validation'], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer __lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file if has_ref: __lowerCAmelCase = False # Data collator # This one will take care of randomly masking the tokens. __lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowerCAmelCase = Trainer( model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, ) # Training if training_args.do_train: if last_checkpoint is not None: __lowerCAmelCase = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): __lowerCAmelCase = model_args.model_name_or_path else: __lowerCAmelCase = None __lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ ) trainer.save_model() # Saves the tokenizer too for easy upload __lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' ) if trainer.is_world_process_zero(): with open(lowerCAmelCase_, 'w' ) as writer: logger.info('***** Train results *****' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) ) # Evaluation __lowerCAmelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowerCAmelCase = trainer.evaluate() __lowerCAmelCase = math.exp(eval_output['eval_loss'] ) __lowerCAmelCase = perplexity __lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' ) if trainer.is_world_process_zero(): with open(lowerCAmelCase_, 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def a_ ( lowerCAmelCase_ : Tuple ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
53
1
import collections import os import re from pathlib import Path _UpperCamelCase = "src/transformers" # Matches is_xxx_available() _UpperCamelCase = re.compile(r"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} _UpperCamelCase = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _UpperCamelCase = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available _UpperCamelCase = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") _UpperCamelCase = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _UpperCamelCase = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", _UpperCamelCase = re.compile(r"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], _UpperCamelCase = re.compile(r"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo _UpperCamelCase = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: _UpperCamelCase = re.compile(r"^\s*try:") # Catches a line with else: _UpperCamelCase = re.compile(r"^\s*else:") def _lowercase ( lowercase__ ): if _re_test_backend.search(lowercase__ ) is None: return None __lowerCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(lowercase__ )] backends.sort() return "_and_".join(lowercase__ ) def _lowercase ( lowercase__ ): with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowerCAmelCase : Optional[Any] = f.readlines() __lowerCAmelCase : Optional[int] = 0 while line_index < len(lowercase__ ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowercase__ ): return None # First grab the objects without a specific backend in _import_structure __lowerCAmelCase : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: __lowerCAmelCase : Optional[Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowercase__ ): __lowerCAmelCase : Any = _re_one_line_import_struct.search(lowercase__ ).groups()[0] __lowerCAmelCase : int = re.findall(r'''\[([^\]]+)\]''' , lowercase__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue __lowerCAmelCase : Any = _re_import_struct_key_value.search(lowercase__ ) if single_line_import_search is not None: __lowerCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase__ ) > 0] objects.extend(lowercase__ ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 __lowerCAmelCase : Tuple = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowerCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowerCAmelCase : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowerCAmelCase : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): __lowerCAmelCase : Dict = lines[line_index] if _re_import_struct_add_one.search(lowercase__ ) is not None: objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] ) elif _re_import_struct_add_many.search(lowercase__ ) is not None: __lowerCAmelCase : str = _re_import_struct_add_many.search(lowercase__ ).groups()[0].split(''', ''' ) __lowerCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(lowercase__ ) > 0] objects.extend(lowercase__ ) elif _re_between_brackets.search(lowercase__ ) is not None: __lowerCAmelCase : int = _re_between_brackets.search(lowercase__ ).groups()[0].split(''', ''' ) __lowerCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(lowercase__ ) > 0] objects.extend(lowercase__ ) elif _re_quote_object.search(lowercase__ ) is not None: objects.append(_re_quote_object.search(lowercase__ ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 __lowerCAmelCase : List[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowerCAmelCase : Union[str, Any] = [] while ( line_index < len(lowercase__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): __lowerCAmelCase : Any = lines[line_index] __lowerCAmelCase : int = _re_import.search(lowercase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowerCAmelCase : str = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(lowercase__ ): # If the line is an if is_backend_available, we grab all objects associated. __lowerCAmelCase : int = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowerCAmelCase : Optional[int] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowerCAmelCase : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): __lowerCAmelCase : Tuple = lines[line_index] __lowerCAmelCase : int = _re_import.search(lowercase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 __lowerCAmelCase : List[str] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _lowercase ( lowercase__ , lowercase__ ): def find_duplicates(lowercase__ ): return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowerCAmelCase : Optional[Any] = [] for key in import_dict_objects.keys(): __lowerCAmelCase : List[str] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __lowerCAmelCase : List[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowerCAmelCase : int = '''base imports''' if key == '''none''' else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def _lowercase ( ): __lowerCAmelCase : Union[str, Any] = [] for root, _, files in os.walk(lowercase__ ): if "__init__.py" in files: __lowerCAmelCase : str = os.path.join(lowercase__ , '''__init__.py''' ) __lowerCAmelCase : str = parse_init(lowercase__ ) if objects is not None: __lowerCAmelCase : Union[str, Any] = analyze_results(*lowercase__ ) if len(lowercase__ ) > 0: __lowerCAmelCase : Optional[int] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(lowercase__ ) ) if len(lowercase__ ) > 0: raise ValueError('''\n\n'''.join(lowercase__ ) ) def _lowercase ( ): __lowerCAmelCase : Union[str, Any] = [] for path, directories, files in os.walk(lowercase__ ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(lowercase__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowercase__ ) / folder).glob('''*.py''' ) ) ) == 0: continue __lowerCAmelCase : Tuple = str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) ) __lowerCAmelCase : str = short_path.replace(os.path.sep , '''.''' ) submodules.append(lowercase__ ) for fname in files: if fname == "__init__.py": continue __lowerCAmelCase : List[str] = str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) ) __lowerCAmelCase : List[str] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(lowercase__ ) return submodules _UpperCamelCase = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def _lowercase ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import __lowerCAmelCase : Union[str, Any] = direct_transformers_import(lowercase__ ) __lowerCAmelCase : Union[str, Any] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowercase__ , '''__init__.py''' ) , '''r''' ) as f: __lowerCAmelCase : Union[str, Any] = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , lowercase__ ) ) ) __lowerCAmelCase : str = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowercase__ ) > 0: __lowerCAmelCase : Any = '''\n'''.join(f"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' f"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
583
from __future__ import annotations from math import ceil, floor, sqrt def _lowercase ( lowercase__ = 2_0_0_0_0_0_0 ): __lowerCAmelCase : list[int] = [0] __lowerCAmelCase : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCAmelCase : int = 0 # the area corresponding to the grid that gives the product closest to target __lowerCAmelCase : int = 0 # an estimate of b, using the quadratic formula __lowerCAmelCase : float # the largest integer less than b_estimate __lowerCAmelCase : int # the largest integer less than b_estimate __lowerCAmelCase : int # the triangle number corresponding to b_floor __lowerCAmelCase : int # the triangle number corresponding to b_ceil __lowerCAmelCase : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCAmelCase : Any = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCAmelCase : List[str] = floor(lowercase__ ) __lowerCAmelCase : str = ceil(lowercase__ ) __lowerCAmelCase : Dict = triangle_numbers[b_floor] __lowerCAmelCase : Dict = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCAmelCase : Union[str, Any] = triangle_b_first_guess * triangle_a __lowerCAmelCase : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCAmelCase : Optional[Any] = triangle_b_second_guess * triangle_a __lowerCAmelCase : int = idx_a * b_ceil return area if __name__ == "__main__": print(F"{solution() = }")
583
1
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" A_ = 10 def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A_ = [1, 2, 3, 4] A_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case ) def lowerCamelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case ) def lowerCamelCase__ ( self : str ) -> Dict: """simple docstring""" A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] A_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case ) def lowerCamelCase__ ( self : Dict ) -> Dict: """simple docstring""" A_ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." A_ , A_ = process_story(_snake_case ) self.assertEqual(_snake_case , [] ) def lowerCamelCase__ ( self : str ) -> str: """simple docstring""" A_ = "" A_ , A_ = process_story(_snake_case ) self.assertEqual(_snake_case , [] ) self.assertEqual(_snake_case , [] ) def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" A_ = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) A_ , A_ = process_story(_snake_case ) A_ = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(_snake_case , _snake_case ) A_ = ["It was the best of times."] self.assertEqual(_snake_case , _snake_case ) def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" A_ = torch.tensor([1, 2, 3, 4] ) A_ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_snake_case , 0 ).numpy() , expected.numpy() ) def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" A_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) A_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_snake_case , 23 ).numpy() , expected.numpy() ) def lowerCamelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" A_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) A_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_snake_case , 1 ).numpy() , expected.numpy() ) def lowerCamelCase__ ( self : int ) -> List[str]: """simple docstring""" A_ = 101 A_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) A_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) A_ = compute_token_type_ids(_snake_case , _snake_case ) np.testing.assert_array_equal(_snake_case , _snake_case )
115
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( _lowercase , unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" A_ = ort.SessionOptions() A_ = False return options def lowerCamelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) A_ = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) A_ = "A red cat sitting on a park bench" A_ = np.random.RandomState(0 ) A_ = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type="np" , ) A_ = output.images A_ = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) A_ = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowerCamelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) A_ = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) A_ = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) A_ = "A red cat sitting on a park bench" A_ = np.random.RandomState(0 ) A_ = pipe( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type="np" , ) A_ = output.images A_ = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) A_ = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
115
1
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel UpperCamelCase__ = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ): """simple docstring""" return torch.atana(_UpperCamelCase , _UpperCamelCase ) / math.pi * 2 def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : str = torch.sin(t * math.pi / 2 ) ** 2 lowercase_ : List[Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_UpperCamelCase , _UpperCamelCase ) class _UpperCAmelCase ( snake_case ): pass class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[int] , a : Any ): '''simple docstring''' super().__init__() lowercase_ : Optional[Any] = DiffusionAttnUnetaD(a , n_attn_layers=4 ) lowercase_ : List[Any] = deepcopy(self.diffusion ) lowercase_ : int = torch.quasirandom.SobolEngine(1 , scramble=a ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : Dict = MODELS_MAP[model_name]["url"] os.system(F"""wget {url} ./""" ) return F"""./{model_name}.ckpt""" UpperCamelCase__ = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } UpperCamelCase__ = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } UpperCamelCase__ = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } UpperCamelCase__ = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } UpperCamelCase__ = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } UpperCamelCase__ = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): return name.replace(_UpperCamelCase , _UpperCamelCase ) elif name.startswith(_UpperCamelCase ): return [name.replace(_UpperCamelCase , _UpperCamelCase ) for v in value] raise ValueError(F"""Attn error with {name}""" ) def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=13 ): """simple docstring""" lowercase_ : List[Any] = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) lowercase_ : Optional[int] = 0 if string.startswith("net.3." ): depth += 1 lowercase_ : int = string[6:] elif string.startswith("net." ): lowercase_ : Optional[Any] = string[4:] while string.startswith("main.7." ): depth += 1 lowercase_ : Optional[int] = string[7:] if string.startswith("main." ): lowercase_ : int = string[5:] # mid block if string[:2].isdigit(): lowercase_ : Any = string[:2] lowercase_ : Dict = string[2:] else: lowercase_ : Optional[Any] = string[0] lowercase_ : List[str] = string[1:] if depth == max_depth: lowercase_ : List[str] = MID_NUM_TO_LAYER[layer_num] lowercase_ : Any = "mid_block" elif depth > 0 and int(_UpperCamelCase ) < 7: lowercase_ : Optional[int] = DOWN_NUM_TO_LAYER[layer_num] lowercase_ : List[str] = F"""down_blocks.{depth}""" elif depth > 0 and int(_UpperCamelCase ) > 7: lowercase_ : str = UP_NUM_TO_LAYER[layer_num] lowercase_ : Any = F"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: lowercase_ : int = DEPTH_0_TO_LAYER[layer_num] lowercase_ : Optional[Any] = F"""up_blocks.{max_depth - 1}""" if int(_UpperCamelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" ) lowercase_ : Tuple = string_left[1:] if "resnets" in new_layer: lowercase_ : Tuple = convert_resconv_naming(_UpperCamelCase ) elif "attentions" in new_layer: lowercase_ : Union[str, Any] = convert_attn_naming(_UpperCamelCase ) lowercase_ : Tuple = new_string_left if not isinstance(_UpperCamelCase , _UpperCamelCase ): lowercase_ : Optional[int] = prefix + "." + new_layer + "." + string_left else: lowercase_ : Any = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : Any = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue lowercase_ : List[str] = rename(_UpperCamelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_UpperCamelCase , _UpperCamelCase ): lowercase_ : List[str] = transform_conv_attns(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: lowercase_ : Tuple = v return new_state_dict def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" if len(_UpperCamelCase ) == 1: if len(v.shape ) == 3: # weight lowercase_ : List[Any] = v[:, :, 0] else: # bias lowercase_ : Any = v else: # qkv matrices lowercase_ : str = v.shape[0] lowercase_ : Optional[int] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: lowercase_ : Any = v[i * single_shape : (i + 1) * single_shape, :, 0] else: lowercase_ : List[str] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) lowercase_ : List[Any] = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" lowercase_ : Optional[Any] = download(_UpperCamelCase ) lowercase_ : Any = MODELS_MAP[model_name]["sample_rate"] lowercase_ : List[str] = MODELS_MAP[model_name]["sample_size"] lowercase_ : str = Object() lowercase_ : List[str] = sample_size lowercase_ : List[Any] = sample_rate lowercase_ : Any = 0 lowercase_ : Any = UNetaDModel(sample_size=_UpperCamelCase , sample_rate=_UpperCamelCase ) lowercase_ : Any = diffusers_model.state_dict() lowercase_ : int = DiffusionUncond(_UpperCamelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCamelCase )["state_dict"] ) lowercase_ : Dict = orig_model.diffusion_ema.eval() lowercase_ : Dict = orig_model.state_dict() lowercase_ : str = rename_orig_weights(_UpperCamelCase ) lowercase_ : int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) lowercase_ : int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith("kernel" ) for k in list(_UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": lowercase_ : int = value.squeeze() lowercase_ : List[Any] = value diffusers_model.load_state_dict(_UpperCamelCase ) lowercase_ : int = 100 lowercase_ : List[Any] = 33 lowercase_ : Union[str, Any] = IPNDMScheduler(num_train_timesteps=_UpperCamelCase ) lowercase_ : List[Any] = torch.manual_seed(_UpperCamelCase ) lowercase_ : Any = torch.randn([1, 2, config.sample_size] , generator=_UpperCamelCase ).to(_UpperCamelCase ) lowercase_ : int = torch.linspace(1 , 0 , steps + 1 , device=_UpperCamelCase )[:-1] lowercase_ : int = get_crash_schedule(_UpperCamelCase ) lowercase_ : str = DanceDiffusionPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase ) lowercase_ : Optional[Any] = torch.manual_seed(33 ) lowercase_ : List[Any] = pipe(num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase ).audios lowercase_ : int = sampling.iplms_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {} ) lowercase_ : Any = generated.clamp(-1 , 1 ) lowercase_ : Tuple = (generated - audio).abs().sum() lowercase_ : Tuple = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _UpperCamelCase ) print("Diff max" , _UpperCamelCase ) assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/""" print(F"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') UpperCamelCase__ = parser.parse_args() main(args)
640
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image UpperCamelCase__ = ['text', 'image', 'audio'] def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : List[Any] = [] for input_type in input_types: if input_type == "text": inputs.append("Text input" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): inputs.append(create_inputs(_UpperCamelCase ) ) else: raise ValueError(F"""Invalid type requested: {input_type}""" ) return inputs def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): """simple docstring""" lowercase_ : Optional[int] = [] for output in outputs: if isinstance(_UpperCamelCase , (str, AgentText) ): output_types.append("text" ) elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ): output_types.append("image" ) elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ): output_types.append("audio" ) else: raise ValueError(F"""Invalid output: {output}""" ) return output_types @is_tool_test class _UpperCAmelCase : def lowerCAmelCase__ ( self : List[Any] ): '''simple docstring''' self.assertTrue(hasattr(self.tool , "inputs" ) ) self.assertTrue(hasattr(self.tool , "outputs" ) ) lowercase_ : Optional[Any] = self.tool.inputs for _input in inputs: if isinstance(_input , a ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) lowercase_ : Any = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def lowerCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ : List[str] = create_inputs(self.tool.inputs ) lowercase_ : List[str] = self.tool(*a ) # There is a single output if len(self.tool.outputs ) == 1: lowercase_ : Union[str, Any] = [outputs] self.assertListEqual(output_types(a ) , self.tool.outputs ) def lowerCAmelCase__ ( self : List[str] ): '''simple docstring''' self.assertTrue(hasattr(self.tool , "description" ) ) self.assertTrue(hasattr(self.tool , "default_checkpoint" ) ) self.assertTrue(self.tool.description.startswith("This is a tool that" ) ) def lowerCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ : Any = create_inputs(self.tool.inputs ) lowercase_ : str = self.tool(*a ) if not isinstance(a , a ): lowercase_ : List[Any] = [outputs] self.assertEqual(len(a ) , len(self.tool.outputs ) ) for output, output_type in zip(a , self.tool.outputs ): lowercase_ : int = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(a , a ) ) def lowerCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ : Dict = create_inputs(self.tool.inputs ) lowercase_ : Optional[int] = [] for _input, input_type in zip(a , self.tool.inputs ): if isinstance(a , a ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error lowercase_ : Any = self.tool(*a ) if not isinstance(a , a ): lowercase_ : Any = [outputs] self.assertEqual(len(a ) , len(self.tool.outputs ) )
640
1