code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
class lowercase_ : def __init__( self , lowercase_ , lowercase_) -> str: a__ =name a__ =val def __str__( self) -> Tuple: return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self , lowercase_) -> Any: return self.val < other.val class lowercase_ : def __init__( self , lowercase_) -> Any: a__ ={} a__ ={} a__ =self.build_heap(lowercase_) def __getitem__( self , lowercase_) -> List[str]: return self.get_value(lowercase_) def __UpperCamelCase ( self , lowercase_) -> Optional[int]: return (idx - 1) // 2 def __UpperCamelCase ( self , lowercase_) -> int: return idx * 2 + 1 def __UpperCamelCase ( self , lowercase_) -> List[Any]: return idx * 2 + 2 def __UpperCamelCase ( self , lowercase_) -> Any: return self.heap_dict[key] def __UpperCamelCase ( self , lowercase_) -> str: a__ =len(lowercase_) - 1 a__ =self.get_parent_idx(lowercase_) for idx, i in enumerate(lowercase_): a__ =idx a__ =i.val for i in range(lowercase_ , -1 , -1): self.sift_down(lowercase_ , lowercase_) return array def __UpperCamelCase ( self , lowercase_ , lowercase_) -> List[str]: while True: a__ =self.get_left_child_idx(lowercase_) # noqa: E741 a__ =self.get_right_child_idx(lowercase_) a__ =idx if l < len(lowercase_) and array[l] < array[idx]: a__ =l if r < len(lowercase_) and array[r] < array[smallest]: a__ =r if smallest != idx: a__ , a__ =array[smallest], array[idx] ( ( a__ ) , ( a__ ) , ) =( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) a__ =smallest else: break def __UpperCamelCase ( self , lowercase_) -> Dict: a__ =self.get_parent_idx(lowercase_) while p >= 0 and self.heap[p] > self.heap[idx]: a__ , a__ =self.heap[idx], self.heap[p] a__ , a__ =( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) a__ =p a__ =self.get_parent_idx(lowercase_) def __UpperCamelCase ( self) -> List[str]: return self.heap[0] def __UpperCamelCase ( self) -> Optional[int]: a__ , a__ =self.heap[-1], self.heap[0] a__ , a__ =( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) a__ =self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap) return x def __UpperCamelCase ( self , lowercase_) -> Tuple: self.heap.append(lowercase_) a__ =len(self.heap) - 1 a__ =node.val self.sift_up(len(self.heap) - 1) def __UpperCamelCase ( self) -> Union[str, Any]: return len(self.heap) == 0 def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int: assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" a__ =new_value a__ =new_value self.sift_up(self.idx_of_element[node]) _lowerCAmelCase: Tuple = Node('R', -1) _lowerCAmelCase: Optional[int] = Node('B', 6) _lowerCAmelCase: Tuple = Node('A', 3) _lowerCAmelCase: int = Node('X', 1) _lowerCAmelCase: List[str] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _lowerCAmelCase: int = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
20
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
0
import cmath import math def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): __magic_name__ : List[str] =math.radians(lowerCamelCase ) __magic_name__ : Union[str, Any] =math.radians(lowerCamelCase ) # Convert voltage and current to rectangular form __magic_name__ : int =cmath.rect(lowerCamelCase , lowerCamelCase ) __magic_name__ : List[str] =cmath.rect(lowerCamelCase , lowerCamelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
21
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=10 ): '''simple docstring''' _a = [] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple=10 ): '''simple docstring''' _a = [] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(UpperCamelCase , '''schedule.bin''' ) torch.save(scheduler.state_dict() , UpperCamelCase ) _a = torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int: """simple docstring""" self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" _a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ ) _a = torch.tensor([0.4, 0.2, -0.5] ) _a = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _a = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): _a = criterion(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" _a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ ) _a = torch.tensor([0.4, 0.2, -0.5] ) _a = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _a = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase_ , weight_decay=0.0 , relative_step=lowerCAmelCase_ , scale_parameter=lowerCAmelCase_ , warmup_init=lowerCAmelCase_ , ) for _ in range(10_00 ): _a = criterion(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class A ( unittest.TestCase ): lowercase_ = nn.Linear(50 ,50 ) if is_torch_available() else None lowercase_ = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None lowercase_ = 10 def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]: """simple docstring""" self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ , msg=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _a = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): _a , _a = data _a = scheduler_func(self.optimizer , **lowerCAmelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _a = unwrap_schedule(lowerCAmelCase_ , self.num_steps ) self.assertListAlmostEqual( lowerCAmelCase_ , lowerCAmelCase_ , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , ) _a = scheduler_func(self.optimizer , **lowerCAmelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase_ ) # wrap to test picklability of the schedule _a = unwrap_and_save_reload_schedule(lowerCAmelCase_ , self.num_steps ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ , msg=F'failed for {scheduler_func} in save and reload' ) class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]: """simple docstring""" _a = fn def __call__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ) -> Optional[Any]: """simple docstring""" return self.fn(*lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple: """simple docstring""" _a = list(map(self , scheduler.lr_lambdas ) )
22
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
0
def _snake_case (__lowercase): UpperCamelCase_ = 1 for i in range(1 , num + 1): fact *= i return fact def _snake_case (__lowercase): UpperCamelCase_ = 0 while number > 0: UpperCamelCase_ = number % 10 sum_of_digits += last_digit UpperCamelCase_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _snake_case (__lowercase = 100): UpperCamelCase_ = factorial(__lowercase) UpperCamelCase_ = split_and_add(__lowercase) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
23
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : Any = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : Tuple = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] )-> Optional[int]: '''simple docstring''' for attribute in key.split('''.''' ): __snake_case = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value elif weight_type == "running_mean": __snake_case = value elif weight_type == "running_var": __snake_case = value elif weight_type == "num_batches_tracked": __snake_case = value elif weight_type == "inv_freq": __snake_case = value else: __snake_case = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> int: '''simple docstring''' __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case = True else: for key, mapped_key in MAPPING.items(): __snake_case = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case = True if "*" in mapped_key: __snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2] __snake_case = mapped_key.replace('''*''' , _lowerCamelCase ) if "pos_bias_u" in name: __snake_case = None elif "pos_bias_v" in name: __snake_case = None elif "weight_g" in name: __snake_case = '''weight_g''' elif "weight_v" in name: __snake_case = '''weight_v''' elif "bias" in name: __snake_case = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = '''weight''' elif "running_mean" in name: __snake_case = '''running_mean''' elif "inv_freq" in name: __snake_case = '''inv_freq''' elif "running_var" in name: __snake_case = '''running_var''' elif "num_batches_tracked" in name: __snake_case = '''num_batches_tracked''' else: __snake_case = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] )-> Optional[Any]: '''simple docstring''' __snake_case = full_name.split('''conv_layers.''' )[-1] __snake_case = name.split('''.''' ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : str=True )-> Optional[Any]: '''simple docstring''' if config_path is not None: __snake_case = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act='''swish''' ) else: __snake_case = WavaVecaConformerConfig() if "rope" in checkpoint_path: __snake_case = '''rotary''' if is_finetuned: if dict_path: __snake_case = Dictionary.load(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case = target_dict.pad_index __snake_case = target_dict.bos_index __snake_case = target_dict.eos_index __snake_case = len(target_dict.symbols ) __snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' ) if not os.path.isdir(_lowerCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) __snake_case = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case = 0 __snake_case = 1 with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) __snake_case = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , ) __snake_case = True if config.feat_extract_norm == '''layer''' else False __snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) __snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case = WavaVecaConformerForCTC(_lowerCamelCase ) else: __snake_case = WavaVecaConformerForPreTraining(_lowerCamelCase ) if is_finetuned: __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case = argparse.Namespace(task='''audio_pretraining''' ) __snake_case = fairseq.tasks.setup_task(_lowerCamelCase ) __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase ) __snake_case = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCAmelCase_ : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
24
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
0
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging a_ = logging.get_logger(__name__) class _UpperCamelCase : '''simple docstring''' lowerCamelCase__ =None @experimental def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a): if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( _a , _a , _a , _a , _a , _a , _a) return _map_with_joblib(_a , _a , _a , _a , _a , _a , _a) def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a): SCREAMING_SNAKE_CASE : Dict = num_proc if num_proc <= len(_a) else len(_a) SCREAMING_SNAKE_CASE : Optional[int] = [] # We organize the splits ourselve (contiguous splits) for index in range(_a): SCREAMING_SNAKE_CASE : Any = len(_a) // num_proc SCREAMING_SNAKE_CASE : int = len(_a) % num_proc SCREAMING_SNAKE_CASE : List[Any] = div * index + min(_a , _a) SCREAMING_SNAKE_CASE : List[str] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc)) if len(_a) != sum(len(i[1]) for i in split_kwds): raise ValueError( f"Error dividing inputs iterable among processes. " f"Total number of objects {len(_a)}, " f"length: {sum(len(i[1]) for i in split_kwds)}") logger.info( f"Spawning {num_proc} processes for {len(_a)} objects in slices of {[len(i[1]) for i in split_kwds]}") SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = None, None if not disable_tqdm: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = (RLock(),), tqdm.set_lock with Pool(_a , initargs=_a , initializer=_a) as pool: SCREAMING_SNAKE_CASE : List[str] = pool.map(_a , _a) logger.info(f"Finished {num_proc} processes") SCREAMING_SNAKE_CASE : str = [obj for proc_res in mapped for obj in proc_res] logger.info(f"Unpacked {len(_a)} objects") return mapped def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a): # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_a): return joblib.Parallel()( joblib.delayed(_a)((function, obj, types, None, True, None)) for obj in iterable) @experimental @contextlib.contextmanager def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : int = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: SCREAMING_SNAKE_CASE : Optional[int] = None
25
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int]=13 , __magic_name__ : List[str]=7 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=False , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=99 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[Any]=4 , __magic_name__ : str=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : str=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : List[str]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=4 , __magic_name__ : Any=None , ) -> Dict: """simple docstring""" __snake_case : Tuple = parent __snake_case : Any = batch_size __snake_case : Optional[int] = seq_length __snake_case : int = is_training __snake_case : Union[str, Any] = use_input_mask __snake_case : Any = use_token_type_ids __snake_case : Optional[Any] = use_labels __snake_case : str = vocab_size __snake_case : Any = hidden_size __snake_case : int = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : str = intermediate_size __snake_case : Optional[int] = hidden_act __snake_case : Any = hidden_dropout_prob __snake_case : str = attention_probs_dropout_prob __snake_case : str = max_position_embeddings __snake_case : Optional[Any] = type_vocab_size __snake_case : int = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Optional[Any] = num_labels __snake_case : str = num_choices __snake_case : List[Any] = scope def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : List[str] = None if self.use_input_mask: __snake_case : int = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Dict = None if self.use_token_type_ids: __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = None __snake_case : str = None __snake_case : Optional[int] = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : int = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : Optional[Any] ) -> Any: """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" __snake_case : int = LlamaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : int = model(__magic_name__ , attention_mask=__magic_name__ ) __snake_case : int = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , ) -> str: """simple docstring""" __snake_case : Optional[int] = True __snake_case : Optional[int] = LlamaModel(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : str = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , ) __snake_case : List[Any] = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , ) __snake_case : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Any , ) -> str: """simple docstring""" __snake_case : Dict = LlamaForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : str , ) -> Dict: """simple docstring""" __snake_case : int = True __snake_case : Optional[Any] = True __snake_case : Union[str, Any] = LlamaForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # first forward pass __snake_case : List[str] = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , ) __snake_case : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __snake_case : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case : Any = torch.cat([input_mask, next_mask] , dim=-1 ) __snake_case : int = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0] __snake_case : int = model( __magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["""hidden_states"""][0] # select random slice __snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case : int = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def lowercase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case : Any = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[str] = config_and_inputs __snake_case : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): lowercase__: int = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowercase__: Tuple = (LlamaForCausalLM,) if is_torch_available() else () lowercase__: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) lowercase__: Any = False lowercase__: List[Any] = False def lowercase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __snake_case : int = LlamaModelTester(self ) __snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def lowercase__ ( self : str ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self : int ) -> List[Any]: """simple docstring""" __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Union[str, Any] = type self.model_tester.create_and_check_model(*__magic_name__ ) def lowercase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Optional[Any] = 3 __snake_case : Dict = input_dict["""input_ids"""] __snake_case : List[Any] = input_ids.ne(1 ).to(__magic_name__ ) __snake_case : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : Union[str, Any] = LlamaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : List[str] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase__ ( self : str ) -> str: """simple docstring""" __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[Any] = 3 __snake_case : Optional[int] = """single_label_classification""" __snake_case : int = input_dict["""input_ids"""] __snake_case : Tuple = input_ids.ne(1 ).to(__magic_name__ ) __snake_case : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : str = LlamaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase__ ( self : Optional[int] ) -> int: """simple docstring""" __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Optional[int] = 3 __snake_case : int = """multi_label_classification""" __snake_case : str = input_dict["""input_ids"""] __snake_case : Any = input_ids.ne(1 ).to(__magic_name__ ) __snake_case : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __snake_case : List[Any] = LlamaForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() __snake_case : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def lowercase__ ( self : str ) -> List[Any]: """simple docstring""" pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def lowercase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Optional[int] = ids_tensor([1, 10] , config.vocab_size ) __snake_case : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __snake_case : Dict = LlamaModel(__magic_name__ ) original_model.to(__magic_name__ ) original_model.eval() __snake_case : Optional[int] = original_model(__magic_name__ ).last_hidden_state __snake_case : Optional[Any] = original_model(__magic_name__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __snake_case : Any = {"""type""": scaling_type, """factor""": 10.0} __snake_case : Optional[Any] = LlamaModel(__magic_name__ ) scaled_model.to(__magic_name__ ) scaled_model.eval() __snake_case : Union[str, Any] = scaled_model(__magic_name__ ).last_hidden_state __snake_case : Union[str, Any] = scaled_model(__magic_name__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def lowercase__ ( self : str ) -> int: """simple docstring""" __snake_case : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __snake_case : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __snake_case : List[str] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case : Union[str, Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __magic_name__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def lowercase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __snake_case : Tuple = model(torch.tensor(__magic_name__ ) ) # Expected mean on dim = -1 __snake_case : Union[str, Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __magic_name__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def lowercase__ ( self : int ) -> List[str]: """simple docstring""" __snake_case : List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : Optional[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __snake_case : int = model(torch.tensor(__magic_name__ ) ) # Expected mean on dim = -1 __snake_case : List[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __snake_case : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def lowercase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __snake_case : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] __snake_case : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __snake_case : Optional[int] = model(torch.tensor(__magic_name__ ) ) __snake_case : Any = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __magic_name__ , atol=1E-2 , rtol=1E-2 ) # fmt: off __snake_case : Dict = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __magic_name__ , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def lowercase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" __snake_case : Tuple = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __snake_case : Union[str, Any] = """Simply put, the theory of relativity states that """ __snake_case : str = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , return_tensors="""pt""" ) __snake_case : Union[str, Any] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__magic_name__ ) # greedy generation outputs __snake_case : List[str] = model.generate(__magic_name__ , max_new_tokens=64 , top_p=__magic_name__ , temperature=1 , do_sample=__magic_name__ ) __snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ )
26
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
0
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" _A = generate_pascal_triangle(_SCREAMING_SNAKE_CASE ) for row_idx in range(_SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=' ' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ' ) else: print(triangle[row_idx][col_idx] , end='' ) print() def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[list[int]]: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) _A = [] for current_row_idx in range(_SCREAMING_SNAKE_CASE ): _A = populate_current_row(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) triangle.append(_SCREAMING_SNAKE_CASE ) return triangle def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]: """simple docstring""" _A = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 _A, _A = 1, 1 for current_col_idx in range(1 , _SCREAMING_SNAKE_CASE ): calculate_current_element( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return current_row def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" _A = triangle[current_row_idx - 1][current_col_idx - 1] _A = triangle[current_row_idx - 1][current_col_idx] _A = above_to_left_elt + above_to_right_elt def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[list[int]]: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) _A = [[1]] for row_index in range(1 , _SCREAMING_SNAKE_CASE ): _A = [0] + result[-1] + [0] _A = row_index + 1 # Calculate the number of distinct elements in a row _A = sum(divmod(_SCREAMING_SNAKE_CASE , 2 ) ) _A = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] _A = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() _A = row_first_half + row_second_half result.append(_SCREAMING_SNAKE_CASE ) return result def __lowerCAmelCase( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: _A = F"{func.__name__}({value})" _A = timeit(F"__main__.{call}" , setup='import __main__' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
27
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
'''simple docstring''' import warnings from functools import wraps from typing import Callable def lowercase__( __UpperCamelCase: Callable ): """simple docstring""" @wraps(__UpperCamelCase ) def _inner_fn(*__UpperCamelCase: List[Any] ,**__UpperCamelCase: Any ): warnings.warn( (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") ,__UpperCamelCase ,) return fn(*__UpperCamelCase ,**__UpperCamelCase ) return _inner_fn
28
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" import random from typing import Any def lowercase ( lowerCAmelCase__ ): for _ in range(len(lowerCAmelCase__ ) ): lowerCamelCase_ = random.randint(0 ,len(lowerCAmelCase__ ) - 1 ) lowerCamelCase_ = random.randint(0 ,len(lowerCAmelCase__ ) - 1 ) lowerCamelCase_ , lowerCamelCase_ = data[b], data[a] return data if __name__ == "__main__": A_ = [0, 1, 2, 3, 4, 5, 6, 7] A_ = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
29
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __a = logging.get_logger('transformers.models.encodec') __a = { 'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited', 'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size', 'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed', 'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg', } __a = { 'encoder.model.0.conv.conv': 'encoder.layers.0.conv', 'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv', 'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv', 'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv', 'encoder.model.3.conv.conv': 'encoder.layers.3.conv', 'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv', 'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv', 'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv', 'encoder.model.6.conv.conv': 'encoder.layers.6.conv', 'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv', 'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv', 'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv', 'encoder.model.9.conv.conv': 'encoder.layers.9.conv', 'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv', 'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv', 'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv', 'encoder.model.12.conv.conv': 'encoder.layers.12.conv', 'encoder.model.13.lstm': 'encoder.layers.13.lstm', 'encoder.model.15.conv.conv': 'encoder.layers.15.conv', } __a = { 'encoder.model.0.conv.norm': 'encoder.layers.0.norm', 'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm', 'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm', 'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm', 'encoder.model.3.conv.norm': 'encoder.layers.3.norm', 'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm', 'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm', 'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm', 'encoder.model.6.conv.norm': 'encoder.layers.6.norm', 'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm', 'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm', 'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm', 'encoder.model.9.conv.norm': 'encoder.layers.9.norm', 'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm', 'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm', 'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm', 'encoder.model.12.conv.norm': 'encoder.layers.12.norm', 'encoder.model.15.conv.norm': 'encoder.layers.15.norm', } __a = { 'decoder.model.0.conv.conv': 'decoder.layers.0.conv', 'decoder.model.1.lstm': 'decoder.layers.1.lstm', 'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv', 'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv', 'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv', 'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv', 'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv', 'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv', 'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv', 'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv', 'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv', 'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv', 'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv', 'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv', 'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv', 'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv', 'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv', 'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv', 'decoder.model.15.conv.conv': 'decoder.layers.15.conv', } __a = { 'decoder.model.0.conv.norm': 'decoder.layers.0.norm', 'decoder.model.3.convtr.norm': 'decoder.layers.3.norm', 'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm', 'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm', 'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm', 'decoder.model.6.convtr.norm': 'decoder.layers.6.norm', 'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm', 'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm', 'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm', 'decoder.model.9.convtr.norm': 'decoder.layers.9.norm', 'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm', 'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm', 'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm', 'decoder.model.12.convtr.norm': 'decoder.layers.12.norm', 'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm', 'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm', 'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm', 'decoder.model.15.conv.norm': 'decoder.layers.15.norm', } __a = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __a = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __a = [] __a = [] def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' for attribute in key.split('''.''' ): UpperCAmelCase_ : Optional[Any] = getattr(_lowercase , _lowercase ) if weight_type is not None: UpperCAmelCase_ : List[Any] = getattr(_lowercase , _lowercase ).shape else: UpperCAmelCase_ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase_ : Tuple = value elif weight_type == "weight_g": UpperCAmelCase_ : Any = value elif weight_type == "weight_v": UpperCAmelCase_ : List[Any] = value elif weight_type == "bias": UpperCAmelCase_ : str = value elif weight_type == "running_mean": UpperCAmelCase_ : Union[str, Any] = value elif weight_type == "running_var": UpperCAmelCase_ : str = value elif weight_type == "num_batches_tracked": UpperCAmelCase_ : List[Any] = value elif weight_type == "weight_ih_l0": UpperCAmelCase_ : str = value elif weight_type == "weight_hh_l0": UpperCAmelCase_ : str = value elif weight_type == "bias_ih_l0": UpperCAmelCase_ : Optional[Any] = value elif weight_type == "bias_hh_l0": UpperCAmelCase_ : int = value elif weight_type == "weight_ih_l1": UpperCAmelCase_ : List[Any] = value elif weight_type == "weight_hh_l1": UpperCAmelCase_ : int = value elif weight_type == "bias_ih_l1": UpperCAmelCase_ : int = value elif weight_type == "bias_hh_l1": UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : int = value logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase_, UpperCAmelCase_ : Dict = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Tuple = [] if model_name == "encodec_24khz" or "encodec_32khz": UpperCAmelCase_ : Tuple = MAPPING_24K elif model_name == "encodec_48khz": UpperCAmelCase_ : str = MAPPING_48K else: raise ValueError(f'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(_lowercase , _lowercase ): logger.info(f'''{name} was ignored''' ) continue UpperCAmelCase_ : int = False for key, mapped_key in MAPPING.items(): if "*" in key: UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = key.split('''.*.''' ) if prefix in name and suffix in name: UpperCAmelCase_ : Optional[Any] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue UpperCAmelCase_ : int = True if "*" in mapped_key: UpperCAmelCase_ : Optional[int] = name.split(_lowercase )[0].split('''.''' )[-2] UpperCAmelCase_ : Optional[int] = mapped_key.replace('''*''' , _lowercase ) if "weight_g" in name: UpperCAmelCase_ : List[Any] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase_ : Tuple = '''weight_v''' elif "weight_ih_l0" in name: UpperCAmelCase_ : Optional[int] = '''weight_ih_l0''' elif "weight_hh_l0" in name: UpperCAmelCase_ : Optional[int] = '''weight_hh_l0''' elif "bias_ih_l0" in name: UpperCAmelCase_ : Dict = '''bias_ih_l0''' elif "bias_hh_l0" in name: UpperCAmelCase_ : Any = '''bias_hh_l0''' elif "weight_ih_l1" in name: UpperCAmelCase_ : int = '''weight_ih_l1''' elif "weight_hh_l1" in name: UpperCAmelCase_ : int = '''weight_hh_l1''' elif "bias_ih_l1" in name: UpperCAmelCase_ : Dict = '''bias_ih_l1''' elif "bias_hh_l1" in name: UpperCAmelCase_ : Union[str, Any] = '''bias_hh_l1''' elif "bias" in name: UpperCAmelCase_ : List[str] = '''bias''' elif "weight" in name: UpperCAmelCase_ : int = '''weight''' elif "running_mean" in name: UpperCAmelCase_ : Tuple = '''running_mean''' elif "running_var" in name: UpperCAmelCase_ : Dict = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase_ : Tuple = '''num_batches_tracked''' else: UpperCAmelCase_ : Dict = None set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ): '''simple docstring''' if config_path is not None: UpperCAmelCase_ : int = EncodecConfig.from_pretrained(_lowercase ) else: UpperCAmelCase_ : List[str] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": UpperCAmelCase_ : Union[str, Any] = [8, 5, 4, 4] UpperCAmelCase_ : str = [2.2] UpperCAmelCase_ : Optional[Any] = 64 UpperCAmelCase_ : str = 32000 UpperCAmelCase_ : Tuple = 2048 UpperCAmelCase_ : str = False UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : int = False elif model_name == "encodec_48khz": UpperCAmelCase_ : List[str] = [8, 5, 4, 2] UpperCAmelCase_ : Optional[int] = [3.0, 6.0, 12.0, 24.0] UpperCAmelCase_ : Optional[int] = 48000 UpperCAmelCase_ : List[str] = 2 UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : Any = '''time_group_norm''' UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Union[str, Any] = 1.0 UpperCAmelCase_ : List[Any] = 0.01 else: raise ValueError(f'''Unknown model name: {model_name}''' ) UpperCAmelCase_ : int = EncodecModel(_lowercase ) UpperCAmelCase_ : List[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(_lowercase ) UpperCAmelCase_ : int = torch.load(_lowercase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights UpperCAmelCase_ : Optional[int] = original_checkpoint['''best_state'''] recursively_load_weights(_lowercase , _lowercase , _lowercase ) model.save_pretrained(_lowercase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(_lowercase ) model.push_to_hub(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--model', default='encodec_24khz', type=str, help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) __a = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
30
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
0
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str: SCREAMING_SNAKE_CASE_ = [[] for _ in range(__UpperCAmelCase )] SCREAMING_SNAKE_CASE_ = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1 or len(__UpperCAmelCase ) <= key: return input_string for position, character in enumerate(__UpperCAmelCase ): SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = [''.join(__UpperCAmelCase ) for row in temp_grid] SCREAMING_SNAKE_CASE_ = ''.join(__UpperCAmelCase ) return output_string def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str: SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1: return input_string SCREAMING_SNAKE_CASE_ = [[] for _ in range(__UpperCAmelCase )] # generates template for position in range(len(__UpperCAmelCase ) ): SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('*' ) SCREAMING_SNAKE_CASE_ = 0 for row in temp_grid: # fills in the characters SCREAMING_SNAKE_CASE_ = input_string[counter : counter + len(__UpperCAmelCase )] grid.append(list(__UpperCAmelCase ) ) counter += len(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = '' # reads as zigzag for position in range(len(__UpperCAmelCase ) ): SCREAMING_SNAKE_CASE_ = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict[int, str]: SCREAMING_SNAKE_CASE_ = {} for key_guess in range(1 , len(__UpperCAmelCase ) ): # tries every key SCREAMING_SNAKE_CASE_ = decrypt(__UpperCAmelCase , __UpperCAmelCase ) return results if __name__ == "__main__": import doctest doctest.testmod()
31
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
0
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = downstream_dict['''projector.weight'''] _UpperCAmelCase = downstream_dict['''projector.bias'''] _UpperCAmelCase = downstream_dict['''model.post_net.linear.weight'''] _UpperCAmelCase = downstream_dict['''model.post_net.linear.bias'''] return model def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = downstream_dict['''model.linear.weight'''] _UpperCAmelCase = downstream_dict['''model.linear.bias'''] return model def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int: """simple docstring""" _UpperCAmelCase = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = downstream_dict['''connector.weight'''] _UpperCAmelCase = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] _UpperCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] _UpperCAmelCase = downstream_dict['''objective.W'''] return model @torch.no_grad() def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' ) _UpperCAmelCase = checkpoint['''Downstream'''] _UpperCAmelCase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): _UpperCAmelCase = convert_classification(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif arch.endswith('''ForAudioFrameClassification''' ): _UpperCAmelCase = convert_diarization(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif arch.endswith('''ForXVector''' ): _UpperCAmelCase = convert_xvector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") UpperCAmelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
32
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = ['image_processor', 'tokenizer'] __lowercase : Optional[int] = 'BridgeTowerImageProcessor' __lowercase : int = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self:Union[str, Any] , _a:Union[str, Any] , _a:Any ): super().__init__(_a , _a ) def __call__( self:str , _a:Tuple , _a:Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a:bool = True , _a:Union[bool, str, PaddingStrategy] = False , _a:Union[bool, str, TruncationStrategy] = None , _a:Optional[int] = None , _a:int = 0 , _a:Optional[int] = None , _a:Optional[bool] = None , _a:Optional[bool] = None , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = True , _a:Optional[Union[str, TensorType]] = None , **_a:Dict , ): snake_case__ = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask snake_case__ = self.image_processor( _a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a ) encoding.update(_a ) return encoding def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Optional[int] , **_a:Optional[Any] ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , *_a:Tuple , **_a:List[str] ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = self.tokenizer.model_input_names snake_case__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
33
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class snake_case_ ( lowerCamelCase_ ): """simple docstring""" A_ = '''falcon''' A_ = ['''past_key_values'''] def __init__( self , lowerCamelCase_=6_5_0_2_4 , lowerCamelCase_=4_5_4_4 , lowerCamelCase_=3_2 , lowerCamelCase_=7_1 , lowerCamelCase_=1e-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=1_1 , lowerCamelCase_=1_1 , **lowerCamelCase_ , ) -> Any: UpperCamelCase = vocab_size # Backward compatibility with n_embed kwarg UpperCamelCase = kwargs.pop('''n_embed''' , lowerCamelCase_) UpperCamelCase = hidden_size if n_embed is None else n_embed UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = layer_norm_epsilon UpperCamelCase = initializer_range UpperCamelCase = use_cache UpperCamelCase = hidden_dropout UpperCamelCase = attention_dropout UpperCamelCase = bos_token_id UpperCamelCase = eos_token_id UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads UpperCamelCase = alibi UpperCamelCase = new_decoder_architecture UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True UpperCamelCase = parallel_attn UpperCamelCase = bias super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_) @property def UpperCAmelCase__ ( self) -> int: return self.hidden_size // self.num_attention_heads @property def UpperCAmelCase__ ( self) -> str: return not self.alibi
34
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
0
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ :Union[str, Any] = 16 a_ :Any = 32 def a ( A__ , A__ , A__ , A__ , A__ = 1_6 ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) SCREAMING_SNAKE_CASE__ : Any = DatasetDict( { '''train''': dataset['''train'''].select(A__ ), '''validation''': dataset['''train'''].select(A__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(A__ ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE__ : List[str] = datasets.map( A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE__ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(A__ ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE__ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_6 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE__ : Tuple = 8 else: SCREAMING_SNAKE_CASE__ : int = None return tokenizer.pad( A__ , padding='''longest''' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='''pt''' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader( tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader( tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) SCREAMING_SNAKE_CASE__ : Tuple = DataLoader( tokenized_datasets['''test'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader, test_dataloader def a ( A__ , A__ ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Download the dataset SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits SCREAMING_SNAKE_CASE__ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE__ : Union[str, Any] = config['''lr'''] SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''num_epochs'''] ) SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''seed'''] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config['''batch_size'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE__ : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(A__ ) # New Code # # Create our folds: SCREAMING_SNAKE_CASE__ : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) SCREAMING_SNAKE_CASE__ : List[str] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(A__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_fold_dataloaders( A__ , A__ , A__ , A__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ ) # Instantiate scheduler SCREAMING_SNAKE_CASE__ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=1_0_0 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # Now we train the model for epoch in range(A__ ): model.train() for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(**A__ ) SCREAMING_SNAKE_CASE__ : Tuple = outputs.loss SCREAMING_SNAKE_CASE__ : List[str] = loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[str] = model(**A__ ) SCREAMING_SNAKE_CASE__ : Dict = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=A__ , references=A__ , ) SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , A__ ) # New Code # # We also run predictions on the test set at the very end SCREAMING_SNAKE_CASE__ : List[str] = [] for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any = model(**A__ ) SCREAMING_SNAKE_CASE__ : List[str] = outputs.logits SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(A__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(A__ , dim=0 ) SCREAMING_SNAKE_CASE__ : List[str] = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) SCREAMING_SNAKE_CASE__ : List[str] = metric.compute(predictions=A__ , references=A__ ) accelerator.print('''Average test metrics from all folds:''' , A__ ) def a ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=A__ , default=A__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=A__ , default=3 , help='''The number of splits to perform across the dataset''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(A__ , A__ ) if __name__ == "__main__": main()
35
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
0
from __future__ import annotations from math import gcd def lowercase ( __A : int , __A : int = 2 , __A : int = 1 , __A : int = 3 , ) -> int | None: '''simple docstring''' if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__A : int , __A : int , __A : int ) -> int: return (pow(__A , 2 ) + step) % modulus for _ in range(__A ): # These track the position within the cycle detection logic. snake_case : str = seed snake_case : int = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. snake_case : Union[str, Any] = rand_fn(__A , __A , __A ) snake_case : Dict = rand_fn(__A , __A , __A ) snake_case : int = rand_fn(__A , __A , __A ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. snake_case : int = gcd(hare - tortoise , __A ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. snake_case : Optional[Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowercase : Any = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) __lowercase : List[Any] = parser.parse_args() __lowercase : Tuple = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: __lowercase : Optional[int] = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
36
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
0
'''simple docstring''' from __future__ import annotations def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]: '''simple docstring''' snake_case__ : list[list[int]] = [] create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ ) return result def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(__magic_name__ , total_number - level + 2 ): current_list.append(__magic_name__ ) create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ ) current_list.pop() def UpperCamelCase__ ( __magic_name__ : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*__magic_name__ ) if __name__ == "__main__": A_ : Optional[int] = 4 A_ : Dict = 2 A_ : List[Any] = generate_all_combinations(n, k) print_all_state(total_list)
38
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase_ = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. lowerCAmelCase_ = doctest.register_optionflag('''IGNORE_RESULT''') lowerCAmelCase_ = doctest.OutputChecker class snake_case_ ( __A ): '''simple docstring''' def snake_case__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Any ) ->List[Any]: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) lowerCAmelCase_ = CustomOutputChecker lowerCAmelCase_ = HfDoctestModule lowerCAmelCase_ = HfDocTestParser
39
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
0
from random import randint, random def UpperCamelCase ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ) -> list: UpperCamelCase : str = [[-1] * number_of_cells] # Create a highway without any car UpperCamelCase : List[str] = 0 UpperCamelCase : List[str] = max(snake_case__ , 0 ) while i < number_of_cells: UpperCamelCase : int = ( randint(0 , snake_case__ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def UpperCamelCase ( snake_case__ : list , snake_case__ : int ) -> int: UpperCamelCase : Dict = 0 UpperCamelCase : List[str] = highway_now[car_index + 1 :] for cell in range(len(snake_case__ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(snake_case__ , -1 ) def UpperCamelCase ( snake_case__ : list , snake_case__ : float , snake_case__ : int ) -> list: UpperCamelCase : Any = len(snake_case__ ) # Beforce calculations, the highway is empty UpperCamelCase : Optional[int] = [-1] * number_of_cells for car_index in range(snake_case__ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed UpperCamelCase : int = min(highway_now[car_index] + 1 , snake_case__ ) # Number of empty cell before the next car UpperCamelCase : str = get_distance(snake_case__ , snake_case__ ) - 1 # We can't have the car causing an accident UpperCamelCase : Union[str, Any] = min(next_highway[car_index] , snake_case__ ) if random() < probability: # Randomly, a driver will slow down UpperCamelCase : Any = max(next_highway[car_index] - 1 , 0 ) return next_highway def UpperCamelCase ( snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ) -> list: UpperCamelCase : Dict = len(highway[0] ) for i in range(snake_case__ ): UpperCamelCase : List[str] = update(highway[i] , snake_case__ , snake_case__ ) UpperCamelCase : Tuple = [-1] * number_of_cells for car_index in range(snake_case__ ): UpperCamelCase : Union[str, Any] = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) UpperCamelCase : Union[str, Any] = (car_index + speed) % number_of_cells # Commit the change of position UpperCamelCase : Optional[int] = speed highway.append(snake_case__ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
40
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
0
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _A ( A__ ): """simple docstring""" if isinstance(A__ , collections.abc.Iterable ): return x return (x, x) @require_tf class lowercase_ : """simple docstring""" def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ): pass def SCREAMING_SNAKE_CASE ( self : str ): pass def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[int]=None ,**lowercase__ : str ): __lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase__ ,lowercase__ ) __lowercase = TFVisionTextDualEncoderModel(lowercase__ ) __lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ) self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : Optional[int]=None ,**lowercase__ : Any ): __lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ ) __lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ ) __lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ) self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=None ,**lowercase__ : Any ): __lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ ) __lowercase = {'''vision_model''': vision_model, '''text_model''': text_model} __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase__ ) __lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ) self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Any=None ,**lowercase__ : Dict ): __lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ ) __lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ ) __lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ) __lowercase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase__ ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(lowercase__ ) __lowercase = model(input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ) __lowercase = after_output[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase__ ,1e-5 ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any]=None ,**lowercase__ : List[Any] ): __lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ ) __lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ ) __lowercase = model( input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ,output_attentions=lowercase__ ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(lowercase__ ) ,vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(lowercase__ ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : np.ndarray ,lowercase__ : float ): __lowercase = np.abs((a - b) ).max() self.assertLessEqual(lowercase__ ,lowercase__ ,F"Difference between torch and flax is {diff} (>= {tol})." ) def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = self.prepare_config_and_inputs() self.check_save_load(**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase , __lowercase = self.get_pretrained_model_and_inputs() __lowercase = model_a(**lowercase__ ) __lowercase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase__ ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(lowercase__ ) __lowercase = model_a(**lowercase__ ) __lowercase = after_outputs[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase__ ,1e-5 ) @require_tf class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-random-bert''' ) __lowercase = 1_3 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ): __lowercase = TFViTModel(lowercase__ ,name='''vision_model''' ) __lowercase = TFBertModel(lowercase__ ,name='''text_model''' ) return vision_model, text_model def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = TFViTModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Any ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' ,'''hf-internal-testing/tiny-random-roberta''' ) __lowercase = 1_3 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : str ,lowercase__ : Optional[Any]=None ,**lowercase__ : str ): __lowercase , __lowercase = self.get_vision_text_model(lowercase__ ,lowercase__ ) __lowercase = TFVisionTextDualEncoderModel(vision_model=lowercase__ ,text_model=lowercase__ ) __lowercase = model( input_ids=lowercase__ ,pixel_values=lowercase__ ,attention_mask=lowercase__ ,output_attentions=lowercase__ ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(lowercase__ ) ,vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(lowercase__ ) ,text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : Tuple ): __lowercase = TFDeiTModel(lowercase__ ,name='''vision_model''' ) __lowercase = TFRobertaModel(lowercase__ ,name='''text_model''' ) return vision_model, text_model def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = TFDeiTModelTester(self ) __lowercase = TFRobertaModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' ,'''hf-internal-testing/tiny-random-bert''' ) __lowercase = 1_3 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ): __lowercase = TFCLIPVisionModel(lowercase__ ,name='''vision_model''' ) __lowercase = TFBertModel(lowercase__ ,name='''text_model''' ) return vision_model, text_model def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase = TFCLIPVisionModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = clip_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 ,from_pt=lowercase__ ) __lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) __lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __lowercase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=lowercase__ ,padding=lowercase__ ,return_tensors='''np''' ) __lowercase = model(**lowercase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,) __lowercase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,lowercase__ ,atol=1e-3 ) )
41
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
0
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( __UpperCamelCase ) -> bool: lowerCamelCase_ = str(__UpperCamelCase ) return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set('123456789' ) def _UpperCamelCase ( ) -> int | None: for base_num in range(99_99 ,49_99 ,-1 ): lowerCamelCase_ = 10_00_02 * base_num if is_9_pandigital(__UpperCamelCase ): return candidate for base_num in range(3_33 ,99 ,-1 ): lowerCamelCase_ = 1_00_20_03 * base_num if is_9_pandigital(__UpperCamelCase ): return candidate return None if __name__ == "__main__": print(f'''{solution() = }''')
42
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
0
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : @staticmethod def lowerCamelCase_ ( *UpperCamelCase_: Any , **UpperCamelCase_: Union[str, Any] ) -> int: """simple docstring""" pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): @require_torch def lowerCamelCase_ ( self: int ) -> Dict: """simple docstring""" lowercase__ = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCamelCase_ ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowercase__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], ] , ) @require_tf def lowerCamelCase_ ( self: int ) -> Optional[Any]: """simple docstring""" lowercase__ = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowercase__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], [ {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, {'''score''': 0.333, '''label''': ANY(UpperCamelCase_ )}, ], ] , ) @slow @require_torch def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]: """simple docstring""" lowercase__ = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowercase__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowercase__ = image_classifier(UpperCamelCase_ , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowercase__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
43
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
0
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def A_ ( _lowerCAmelCase : Callable[[int | float], int | float] , _lowerCAmelCase : int | float , _lowerCAmelCase : int | float , _lowerCAmelCase : int = 100 , ): """simple docstring""" _lowerCamelCase : Optional[Any] = x_start _lowerCamelCase : List[str] = fnc(_lowerCAmelCase ) _lowerCamelCase : Optional[int] = 0.0 for _ in range(_lowerCAmelCase ): # Approximates curve as a sequence of linear lines and sums their length _lowerCamelCase : Optional[Any] = (x_end - x_start) / steps + xa _lowerCamelCase : List[str] = fnc(_lowerCAmelCase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step _lowerCamelCase : Any = xa _lowerCamelCase : Union[str, Any] = fxa return length if __name__ == "__main__": def A_ ( _lowerCAmelCase : str ): """simple docstring""" return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') UpperCAmelCase_ : Union[str, Any] = 10 while i <= 10_0000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
44
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
0
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } UpperCamelCase = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } UpperCamelCase = {"facebook/blenderbot_small-90M": 512} def A ( lowercase__ : Optional[int] ) -> List[Any]: UpperCamelCase__ :Union[str, Any] = set() UpperCamelCase__ :List[str] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase__ :List[Any] = char UpperCamelCase__ :Any = set(lowercase__ ) return pairs class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : str = VOCAB_FILES_NAMES _snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ["""input_ids""", """attention_mask"""] def __init__( self :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Tuple="__start__" , lowerCamelCase__ :Union[str, Any]="__end__" , lowerCamelCase__ :Dict="__unk__" , lowerCamelCase__ :List[Any]="__null__" , **lowerCamelCase__ :str , ): super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__ ) with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase__ :Optional[int] = json.load(lowerCamelCase__ ) UpperCamelCase__ :List[str] = {v: k for k, v in self.encoder.items()} with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase__ :Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase__ :List[Any] = [tuple(merge.split() ) for merge in merges] UpperCamelCase__ :List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) UpperCamelCase__ :Union[str, Any] = {} @property def __a ( self :Dict ): return len(self.encoder ) def __a ( self :Optional[int] ): return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self :Union[str, Any] , lowerCamelCase__ :str ): if token in self.cache: return self.cache[token] UpperCamelCase__ :List[str] = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = re.sub("""(')""" , r""" \1 """ , lowerCamelCase__ ) UpperCamelCase__ :Dict = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase__ ) if "\n" in token: UpperCamelCase__ :Dict = token.replace("""\n""" , """ __newln__""" ) UpperCamelCase__ :Dict = token.split(""" """ ) UpperCamelCase__ :Union[str, Any] = [] for token in tokens: if not len(lowerCamelCase__ ): continue UpperCamelCase__ :Any = token.lower() UpperCamelCase__ :Tuple = tuple(lowerCamelCase__ ) UpperCamelCase__ :List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) UpperCamelCase__ :Optional[Any] = get_pairs(lowerCamelCase__ ) if not pairs: words.append(lowerCamelCase__ ) continue while True: UpperCamelCase__ :Optional[Any] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase__ , UpperCamelCase__ :int = bigram UpperCamelCase__ :List[str] = [] UpperCamelCase__ :List[str] = 0 while i < len(lowerCamelCase__ ): try: UpperCamelCase__ :Optional[int] = word.index(lowerCamelCase__ , lowerCamelCase__ ) new_word.extend(word[i:j] ) UpperCamelCase__ :Dict = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase__ :Dict = tuple(lowerCamelCase__ ) UpperCamelCase__ :Any = new_word if len(lowerCamelCase__ ) == 1: break else: UpperCamelCase__ :str = get_pairs(lowerCamelCase__ ) UpperCamelCase__ :Optional[int] = """@@ """.join(lowerCamelCase__ ) UpperCamelCase__ :List[str] = word[:-4] UpperCamelCase__ :Optional[int] = word words.append(lowerCamelCase__ ) return " ".join(lowerCamelCase__ ) def __a ( self :Dict , lowerCamelCase__ :str ): UpperCamelCase__ :Dict = [] UpperCamelCase__ :List[Any] = re.findall(r"""\S+\n?""" , lowerCamelCase__ ) for token in words: split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(""" """ ) ) ) return split_tokens def __a ( self :Union[str, Any] , lowerCamelCase__ :str ): UpperCamelCase__ :List[Any] = token.lower() return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) ) def __a ( self :Optional[int] , lowerCamelCase__ :int ): return self.decoder.get(lowerCamelCase__ , self.unk_token ) def __a ( self :Optional[Any] , lowerCamelCase__ :List[str] ): UpperCamelCase__ :int = """ """.join(lowerCamelCase__ ).replace("""@@ """ , """""" ).strip() return out_string def __a ( self :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ): if not os.path.isdir(lowerCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase__ :Dict = os.path.join( lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ :List[Any] = os.path.join( lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" ) UpperCamelCase__ :Optional[Any] = 0 with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase__ :str = token_index writer.write(""" """.join(lowerCamelCase__ ) + """\n""" ) index += 1 return vocab_file, merge_file
45
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
0
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = "" for i in table: res += inp[i - 1] return res def lowerCamelCase_( _lowerCamelCase ) -> Dict: '''simple docstring''' return data[1:] + data[0] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Optional[Any] = "" for i in range(len(_lowerCamelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : int = int("0b" + data[0] + data[-1] , 2 ) _lowerCamelCase : int = int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : List[Any] = message[:4] _lowerCamelCase : str = message[4:] _lowerCamelCase : Any = apply_table(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[int] = xor(_lowerCamelCase , _lowerCamelCase ) _lowerCamelCase : Optional[Any] = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741 _lowerCamelCase : Any = apply_sbox(_lowerCamelCase , temp[4:] ) _lowerCamelCase : Optional[Any] = "0" * (2 - len(_lowerCamelCase )) + l # noqa: E741 _lowerCamelCase : str = "0" * (2 - len(_lowerCamelCase )) + r _lowerCamelCase : str = apply_table(l + r , _lowerCamelCase ) _lowerCamelCase : Optional[Any] = xor(_lowerCamelCase , _lowerCamelCase ) return temp + right if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = input('''Enter 10 bit key: ''') _lowerCAmelCase : Tuple = input('''Enter 8 bit message: ''') _lowerCAmelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9] _lowerCAmelCase : Optional[int] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] _lowerCAmelCase : Tuple = [2, 4, 3, 1] _lowerCAmelCase : Tuple = [2, 6, 3, 1, 4, 8, 5, 7] _lowerCAmelCase : int = [4, 1, 3, 5, 7, 2, 8, 6] _lowerCAmelCase : int = [4, 1, 2, 3, 2, 3, 4, 1] _lowerCAmelCase : str = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] _lowerCAmelCase : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation _lowerCAmelCase : int = apply_table(key, paa_table) _lowerCAmelCase : int = temp[:5] _lowerCAmelCase : Dict = temp[5:] _lowerCAmelCase : int = left_shift(left) _lowerCAmelCase : List[Any] = left_shift(right) _lowerCAmelCase : List[Any] = apply_table(left + right, pa_table) _lowerCAmelCase : Tuple = left_shift(left) _lowerCAmelCase : Union[str, Any] = left_shift(right) _lowerCAmelCase : Any = left_shift(left) _lowerCAmelCase : List[Any] = left_shift(right) _lowerCAmelCase : Optional[Any] = apply_table(left + right, pa_table) # encryption _lowerCAmelCase : Any = apply_table(message, IP) _lowerCAmelCase : Optional[int] = function(expansion, sa, sa, keya, temp) _lowerCAmelCase : Optional[int] = temp[4:] + temp[:4] _lowerCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) _lowerCAmelCase : str = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption _lowerCAmelCase : Tuple = apply_table(CT, IP) _lowerCAmelCase : str = function(expansion, sa, sa, keya, temp) _lowerCAmelCase : Any = temp[4:] + temp[:4] _lowerCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) _lowerCAmelCase : int = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
46
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ): __a : List[str] = 3_8_4 if "tiny" in model_name: __a : Optional[int] = [3, 3, 9, 3] __a : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8] if "small" in model_name: __a : Any = [3, 3, 2_7, 3] __a : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if "base" in model_name: __a : str = [3, 3, 2_7, 3] __a : Dict = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4] __a : Optional[int] = 5_1_2 if "large" in model_name: __a : Tuple = [3, 3, 2_7, 3] __a : str = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6] __a : Dict = 7_6_8 if "xlarge" in model_name: __a : int = [3, 3, 2_7, 3] __a : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] __a : Optional[int] = 1_0_2_4 # set label information __a : Tuple = 1_5_0 __a : Optional[int] = 'huggingface/label-files' __a : Any = 'ade20k-id2label.json' __a : Dict = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) ) __a : Union[str, Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} __a : Any = {v: k for k, v in idalabel.items()} __a : Optional[int] = ConvNextConfig( depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) __a : Optional[int] = UperNetConfig( backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , ) return config def UpperCAmelCase__ ( lowerCamelCase_ : str ): __a : int = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): __a : List[str] = dct.pop(lowerCamelCase_ ) __a : Optional[Any] = val def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple ): __a : str = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } __a : Any = model_name_to_url[model_name] __a : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['state_dict'] __a : Optional[int] = get_upernet_config(lowerCamelCase_ ) __a : Any = UperNetForSemanticSegmentation(lowerCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __a : Dict = state_dict.pop(lowerCamelCase_ ) if "bn" in key: __a : Dict = key.replace('bn' , 'batch_norm' ) __a : int = val # rename keys __a : Dict = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # verify on image __a : str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' __a : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' ) __a : Union[str, Any] = SegformerImageProcessor() __a : str = processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values with torch.no_grad(): __a : Union[str, Any] = model(lowerCamelCase_ ) if model_name == "upernet-convnext-tiny": __a : Optional[Any] = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": __a : Tuple = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": __a : Optional[int] = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": __a : str = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": __a : int = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
47
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) UpperCAmelCase__ : Tuple = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = 'wavlm' def __init__( self : Dict , __magic_name__ : Dict=32 , __magic_name__ : Tuple=768 , __magic_name__ : Tuple=12 , __magic_name__ : int=12 , __magic_name__ : int=3072 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Dict=0.0 , __magic_name__ : List[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[Any]=1E-5 , __magic_name__ : Union[str, Any]="group" , __magic_name__ : Tuple="gelu" , __magic_name__ : List[str]=(512, 512, 512, 512, 512, 512, 512) , __magic_name__ : Any=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : List[str]=False , __magic_name__ : List[str]=128 , __magic_name__ : int=16 , __magic_name__ : Dict=320 , __magic_name__ : Optional[Any]=800 , __magic_name__ : str=False , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[int]=0.05 , __magic_name__ : Tuple=10 , __magic_name__ : Any=2 , __magic_name__ : List[str]=0.0 , __magic_name__ : Dict=10 , __magic_name__ : Optional[Any]=320 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Tuple=100 , __magic_name__ : int=256 , __magic_name__ : Union[str, Any]=256 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[Any]="mean" , __magic_name__ : Dict=False , __magic_name__ : List[str]=False , __magic_name__ : List[str]=256 , __magic_name__ : Optional[Any]=(512, 512, 512, 512, 1500) , __magic_name__ : Dict=(5, 3, 3, 1, 1) , __magic_name__ : Optional[Any]=(1, 2, 3, 1, 1) , __magic_name__ : int=512 , __magic_name__ : Any=80 , __magic_name__ : List[str]=0 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=2 , __magic_name__ : int=False , __magic_name__ : List[str]=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[Any] , ): """simple docstring""" super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = feat_extract_norm lowerCAmelCase__ = feat_extract_activation lowerCAmelCase__ = list(__magic_name__ ) lowerCAmelCase__ = list(__magic_name__ ) lowerCAmelCase__ = list(__magic_name__ ) lowerCAmelCase__ = conv_bias lowerCAmelCase__ = num_buckets lowerCAmelCase__ = max_bucket_distance lowerCAmelCase__ = num_conv_pos_embeddings lowerCAmelCase__ = num_conv_pos_embedding_groups lowerCAmelCase__ = len(self.conv_dim ) lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = feat_proj_dropout lowerCAmelCase__ = final_dropout lowerCAmelCase__ = layerdrop lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = initializer_range lowerCAmelCase__ = num_ctc_classes lowerCAmelCase__ = vocab_size lowerCAmelCase__ = do_stable_layer_norm lowerCAmelCase__ = use_weighted_layer_sum lowerCAmelCase__ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ = apply_spec_augment lowerCAmelCase__ = mask_time_prob lowerCAmelCase__ = mask_time_length lowerCAmelCase__ = mask_time_min_masks lowerCAmelCase__ = mask_feature_prob lowerCAmelCase__ = mask_feature_length # parameters for pretraining with codevector quantized representations lowerCAmelCase__ = num_codevectors_per_group lowerCAmelCase__ = num_codevector_groups lowerCAmelCase__ = contrastive_logits_temperature lowerCAmelCase__ = num_negatives lowerCAmelCase__ = codevector_dim lowerCAmelCase__ = proj_codevector_dim lowerCAmelCase__ = diversity_loss_weight # ctc loss lowerCAmelCase__ = ctc_loss_reduction lowerCAmelCase__ = ctc_zero_infinity # adapter lowerCAmelCase__ = add_adapter lowerCAmelCase__ = adapter_kernel_size lowerCAmelCase__ = adapter_stride lowerCAmelCase__ = num_adapter_layers lowerCAmelCase__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase__ = list(__magic_name__ ) lowerCAmelCase__ = list(__magic_name__ ) lowerCAmelCase__ = list(__magic_name__ ) lowerCAmelCase__ = xvector_output_dim @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
48
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
0
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : Tuple , _lowercase : Distribution , _lowercase : List[str]=None , _lowercase : str=None , _lowercase : Tuple=0 ): __UpperCAmelCase = 1.0 if scale is None else scale __UpperCAmelCase = 0.0 if loc is None else loc super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] ) @property def a ( self : Tuple ): return self.base_dist.mean * self.scale + self.loc @property def a ( self : int ): return self.base_dist.variance * self.scale**2 @property def a ( self : Optional[int] ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self : List[str] , _lowercase : int , _lowercase : Dict[str, int] , _lowercase : Callable[..., Tuple[torch.Tensor]] , **_lowercase : Tuple ): super().__init__(**_lowercase ) __UpperCAmelCase = args_dim __UpperCAmelCase = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] ) __UpperCAmelCase = domain_map def a ( self : int , _lowercase : torch.Tensor ): __UpperCAmelCase = [proj(_lowercase ) for proj in self.proj] return self.domain_map(*_lowercase ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Union[str, Any] , _lowercase : List[str] ): super().__init__() __UpperCAmelCase = function def a ( self : Union[str, Any] , _lowercase : Tuple , *_lowercase : Optional[Any] ): return self.function(_lowercase , *_lowercase ) class _UpperCAmelCase : a__ : type a__ : int a__ : Dict[str, int] def __init__( self : List[str] , _lowercase : int = 1 ): __UpperCAmelCase = dim __UpperCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def a ( self : List[str] , _lowercase : Optional[Any] ): if self.dim == 1: return self.distribution_class(*_lowercase ) else: return Independent(self.distribution_class(*_lowercase ) , 1 ) def a ( self : Optional[int] , _lowercase : str , _lowercase : Optional[torch.Tensor] = None , _lowercase : Optional[torch.Tensor] = None , ): __UpperCAmelCase = self._base_distribution(_lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim ) @property def a ( self : List[str] ): return () if self.dim == 1 else (self.dim,) @property def a ( self : List[str] ): return len(self.event_shape ) @property def a ( self : Dict ): return 0.0 def a ( self : Dict , _lowercase : int ): return ParameterProjection( in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def a ( self : List[str] , *_lowercase : torch.Tensor ): raise NotImplementedError() @staticmethod def a ( _lowercase : torch.Tensor ): return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} a__ : type = StudentT @classmethod def a ( cls : Optional[Any] , _lowercase : torch.Tensor , _lowercase : torch.Tensor , _lowercase : torch.Tensor ): __UpperCAmelCase = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __UpperCAmelCase = 2.0 + cls.squareplus(_lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict[str, int] = {"loc": 1, "scale": 1} a__ : type = Normal @classmethod def a ( cls : Optional[int] , _lowercase : torch.Tensor , _lowercase : torch.Tensor ): __UpperCAmelCase = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict[str, int] = {"total_count": 1, "logits": 1} a__ : type = NegativeBinomial @classmethod def a ( cls : Tuple , _lowercase : torch.Tensor , _lowercase : torch.Tensor ): __UpperCAmelCase = cls.squareplus(_lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def a ( self : Optional[Any] , _lowercase : Any ): __UpperCAmelCase , __UpperCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=_lowercase , logits=_lowercase ) else: return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 ) def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[torch.Tensor] = None , _lowercase : Optional[torch.Tensor] = None ): __UpperCAmelCase , __UpperCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
49
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class UpperCamelCase__ (a ): '''simple docstring''' def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ): super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ): lowerCamelCase__ = {} lowerCamelCase__ = {} if prompt is not None: lowerCamelCase__ = prompt if generate_kwargs is not None: lowerCamelCase__ = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase__ = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) lowerCamelCase__ = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self ,_lowerCAmelCase ,**_lowerCAmelCase ): return super().__call__(_lowerCAmelCase ,**_lowerCAmelCase ) def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = load_image(_lowerCAmelCase ) if prompt is not None: if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ): raise ValueError( F'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) lowerCamelCase__ = self.model.config.model_type if model_type == "git": lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework ) lowerCamelCase__ = self.tokenizer(text=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ).input_ids lowerCamelCase__ = [self.tokenizer.cls_token_id] + input_ids lowerCamelCase__ = torch.tensor(_lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,header_text=_lowerCAmelCase ,return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework ) lowerCamelCase__ = self.tokenizer(_lowerCAmelCase ,return_tensors=self.framework ) model_inputs.update(_lowerCAmelCase ) else: raise ValueError(F'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase__ = None return model_inputs def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] ,_lowerCAmelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): lowerCamelCase__ = None if generate_kwargs is None: lowerCamelCase__ = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase__ = model_inputs.pop(self.model.main_input_name ) lowerCamelCase__ = self.model.generate(_lowerCAmelCase ,**_lowerCAmelCase ,**_lowerCAmelCase ) return model_outputs def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = [] for output_ids in model_outputs: lowerCamelCase__ = { """generated_text""": self.tokenizer.decode( _lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,) } records.append(_lowerCAmelCase ) return records
50
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __snake_case ( ) -> int: """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict ): super().__init__() UpperCAmelCase = nn.Linear(3 , 4 ) UpperCAmelCase = nn.BatchNormad(4 ) UpperCAmelCase = nn.Linear(4 , 5 ) def __snake_case ( self : Dict , a__ : Tuple ): return self.lineara(self.batchnorm(self.lineara(a__ ) ) ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self : Union[str, Any] ): UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : List[Any] ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) def __snake_case ( self : List[Any] ): UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : str , a__ : int ): nonlocal batch_sizes batch_sizes.append(a__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase, UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(a__ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def __snake_case ( self : Any ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(a__ : Dict ): pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __snake_case ( self : int ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : str ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def __snake_case ( self : str ): @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(a__ : List[Any] , a__ : List[Any] , a__ : Union[str, Any] ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(a__ ) as cm: mock_training_loop_function(128 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def __snake_case ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(a__ : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(a__ ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def __snake_case ( self : Tuple ): UpperCAmelCase = torch.cuda.memory_allocated() UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , a__ ) UpperCAmelCase = release_memory(a__ ) self.assertEqual(torch.cuda.memory_allocated() , a__ )
51
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def __A ( a_ :Iterable[str] , a_ :int) -> Generator[tuple[str, ...], None, None]: __a : List[str] = iter(a_) while True: __a : List[Any] = tuple(itertools.islice(a_ , a_)) if not chunk: return yield chunk def __A ( a_ :str) -> str: __a : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters]) __a : Tuple = '''''' if len(a_) < 2: return dirty for i in range(len(a_) - 1): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(a_) & 1: clean += "X" return clean def __A ( a_ :str) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) __a : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __a : Tuple = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(a_) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(a_) return table def __A ( a_ :str , a_ :str) -> str: __a : Optional[Any] = generate_table(a_) __a : Optional[int] = prepare_input(a_) __a : List[str] = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_ , 2): __a , __a : Optional[Any] = divmod(table.index(a_) , 5) __a , __a : Tuple = divmod(table.index(a_) , 5) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( a_ :str , a_ :str) -> str: __a : Any = generate_table(a_) __a : Any = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_ , 2): __a , __a : Any = divmod(table.index(a_) , 5) __a , __a : Union[str, Any] = divmod(table.index(a_) , 5) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
52
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
0
def a_ ( lowerCAmelCase_ : int ): if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or number < 0: raise ValueError('Input must be a non-negative integer' ) __lowerCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class A ( __lowercase ): _snake_case =42 _snake_case =42 _snake_case =None class A ( __lowercase , __lowercase ): _snake_case =2 @register_to_config def __init__( self: List[str] , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: float = 100 , _lowerCAmelCase: float = 1.0_07 , _lowerCAmelCase: float = 80 , _lowerCAmelCase: float = 0.05 , _lowerCAmelCase: float = 50 , ) -> Any: '''simple docstring''' UpperCAmelCase_ =sigma_max # setable values UpperCAmelCase_ =None UpperCAmelCase_ =None UpperCAmelCase_ =None # sigma(t_i) def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ =num_inference_steps UpperCAmelCase_ =np.arange(0 , self.num_inference_steps )[::-1].copy() UpperCAmelCase_ =torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase ) UpperCAmelCase_ =[ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] UpperCAmelCase_ =torch.tensor(_lowerCAmelCase , dtype=torch.floataa , device=_lowerCAmelCase ) def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float , _lowerCAmelCase: Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase_ =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase_ =0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase_ =self.config.s_noise * randn_tensor(sample.shape , generator=_lowerCAmelCase ).to(sample.device ) UpperCAmelCase_ =sigma + gamma * sigma UpperCAmelCase_ =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float , _lowerCAmelCase: float , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' UpperCAmelCase_ =sample_hat + sigma_hat * model_output UpperCAmelCase_ =(sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase_ =sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float , _lowerCAmelCase: float , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' UpperCAmelCase_ =sample_prev + sigma_prev * model_output UpperCAmelCase_ =(sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase_ =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any ) -> List[Any]: '''simple docstring''' raise NotImplementedError()
54
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Union[str, Any] = '▁' SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Dict = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } SCREAMING_SNAKE_CASE :Optional[int] = { 'google/reformer-crime-and-punishment': 52_4288, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] def __init__( self : Tuple ,A : Any ,A : Any="</s>" ,A : Tuple="<unk>" ,A : Any=[] ,A : Optional[Dict[str, Any]] = None ,**A : List[Any] ,): __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=A ,unk_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : Optional[int] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : List[str] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Any ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : int ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : str ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : str ,A : Optional[Any] ): if index < self.sp_model.get_piece_size(): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : List[str] ): __A = [] __A = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A ) + token __A = [] else: current_sub_tokens.append(A ) out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : List[Any] ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
55
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a : Tuple = logging.get_logger(__name__) class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : Optional[Any] = ["pixel_values"] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE_ ) __snake_case = size if size is not None else {'height': 224, 'width': 224} __snake_case = get_size_dict(SCREAMING_SNAKE_CASE_ ) __snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224} __snake_case = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' ) __snake_case = do_resize __snake_case = do_rescale __snake_case = do_normalize __snake_case = do_center_crop __snake_case = crop_size __snake_case = size __snake_case = resample __snake_case = rescale_factor __snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> np.ndarray: __snake_case = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "shortest_edge" in size: __snake_case = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: __snake_case = (size['height'], size['width']) else: raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> np.ndarray: __snake_case = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> np.ndarray: return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : str , ) -> BatchFeature: __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ ) __snake_case = resample if resample is not None else self.resample __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = size if size is not None else self.size __snake_case = get_size_dict(SCREAMING_SNAKE_CASE_ ) if not is_batched(SCREAMING_SNAKE_CASE_ ): __snake_case = [images] if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __snake_case = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: __snake_case = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: __snake_case = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __snake_case = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: __snake_case = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] __snake_case = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __snake_case = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
56
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
0
import tensorflow as tf from ...tf_utils import shape_list class _lowerCAmelCase( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False , **_lowerCamelCase ): super().__init__(**_lowerCamelCase ) UpperCamelCase_: Dict = vocab_size UpperCamelCase_: Union[str, Any] = d_embed UpperCamelCase_: List[Any] = d_proj UpperCamelCase_: Tuple = cutoffs + [vocab_size] UpperCamelCase_: Any = [0] + self.cutoffs UpperCamelCase_: List[str] = div_val UpperCamelCase_: Optional[int] = self.cutoffs[0] UpperCamelCase_: Tuple = len(self.cutoffs ) - 1 UpperCamelCase_: Optional[Any] = self.shortlist_size + self.n_clusters UpperCamelCase_: int = keep_order UpperCamelCase_: Optional[Any] = [] UpperCamelCase_: int = [] def _a ( self , _lowerCamelCase ): if self.n_clusters > 0: UpperCamelCase_: str = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_lowerCamelCase , name='cluster_weight' ) UpperCamelCase_: Any = self.add_weight( shape=(self.n_clusters,) , initializer='zeros' , trainable=_lowerCamelCase , name='cluster_bias' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: UpperCamelCase_: List[Any] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_projs_._{i}''' , ) self.out_projs.append(_lowerCamelCase ) else: self.out_projs.append(_lowerCamelCase ) UpperCamelCase_: List[Any] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , ) UpperCamelCase_: Optional[Any] = self.add_weight( shape=(self.vocab_size,) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCamelCase_: Dict = self.d_embed // (self.div_val**i) UpperCamelCase_: Any = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_projs_._{i}''' ) self.out_projs.append(_lowerCamelCase ) UpperCamelCase_: Union[str, Any] = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , ) UpperCamelCase_: int = self.add_weight( shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias) ) super().build(_lowerCamelCase ) @staticmethod def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ): UpperCamelCase_: List[str] = x if proj is not None: UpperCamelCase_: Tuple = tf.einsum('ibd,ed->ibe' , _lowerCamelCase , _lowerCamelCase ) return tf.einsum('ibd,nd->ibn' , _lowerCamelCase , _lowerCamelCase ) + b @staticmethod def _a ( _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Tuple = shape_list(_lowerCamelCase ) UpperCamelCase_: Optional[Any] = tf.range(lp_size[0] , dtype=target.dtype ) UpperCamelCase_: Any = tf.stack([r, target] , 1 ) return tf.gather_nd(_lowerCamelCase , _lowerCamelCase ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=False ): UpperCamelCase_: int = 0 if self.n_clusters == 0: UpperCamelCase_: Union[str, Any] = self._logit(_lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: UpperCamelCase_: List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_lowerCamelCase , logits=_lowerCamelCase ) UpperCamelCase_: Optional[Any] = tf.nn.log_softmax(_lowerCamelCase , axis=-1 ) else: UpperCamelCase_: Optional[Any] = shape_list(_lowerCamelCase ) UpperCamelCase_: Union[str, Any] = [] UpperCamelCase_: Optional[Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: UpperCamelCase_: List[Any] = (target >= l_idx) & (target < r_idx) UpperCamelCase_: List[Any] = tf.where(_lowerCamelCase ) UpperCamelCase_: str = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase ) - l_idx if self.div_val == 1: UpperCamelCase_: Optional[Any] = self.out_layers[0][0][l_idx:r_idx] UpperCamelCase_: List[str] = self.out_layers[0][1][l_idx:r_idx] else: UpperCamelCase_: Optional[int] = self.out_layers[i][0] UpperCamelCase_: int = self.out_layers[i][1] if i == 0: UpperCamelCase_: str = tf.concat([cur_W, self.cluster_weight] , 0 ) UpperCamelCase_: Any = tf.concat([cur_b, self.cluster_bias] , 0 ) UpperCamelCase_: List[Any] = self._logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.out_projs[0] ) UpperCamelCase_: int = tf.nn.log_softmax(_lowerCamelCase ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: UpperCamelCase_: Any = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: List[Any] = self._gather_logprob(_lowerCamelCase , _lowerCamelCase ) else: UpperCamelCase_: str = self._logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.out_projs[i] ) UpperCamelCase_: Optional[Any] = tf.nn.log_softmax(_lowerCamelCase ) UpperCamelCase_: List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster UpperCamelCase_: Dict = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(_lowerCamelCase ) if target is not None: UpperCamelCase_: List[Any] = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: Optional[Any] = tf.boolean_mask(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_: int = self._gather_logprob(_lowerCamelCase , _lowerCamelCase ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(_lowerCamelCase , -cur_logprob , shape_list(_lowerCamelCase ) ) UpperCamelCase_: Optional[Any] = tf.concat(_lowerCamelCase , axis=-1 ) if target is not None: if return_mean: UpperCamelCase_: Any = tf.reduce_mean(_lowerCamelCase ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(_lowerCamelCase ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(_lowerCamelCase , name=self.name , aggregation='mean' if return_mean else '' ) return out
57
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" __lowerCAmelCase : Tuple = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' __lowerCAmelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __lowerCAmelCase : Any = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
58
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
0
def lowerCAmelCase_ ( ) -> str: """simple docstring""" lowerCamelCase__: str =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCamelCase__: List[str] =6 lowerCamelCase__: int =1 lowerCamelCase__: int =1901 lowerCamelCase__: List[str] =0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCamelCase__: int =day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCamelCase__: Dict =day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCamelCase__: Tuple =day - days_per_month[month - 2] if month > 12: year += 1 lowerCamelCase__: Union[str, Any] =1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
59
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
0
import os import sys import unittest lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase_ = os.path.join(git_repo_path, '''src''', '''transformers''') lowerCAmelCase_ = ''' {0} = None ''' lowerCAmelCase_ = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' lowerCAmelCase_ = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : List[str] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(__magic_name__ ) snake_case_ : Optional[Any] = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(__magic_name__ , '''tokenizers''' ) snake_case_ : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(__magic_name__ , '''tensorflow_text''' ) snake_case_ : str = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(__magic_name__ , '''sentencepiece_and_tokenizers''' ) snake_case_ : Tuple = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(__magic_name__ , '''sentencepiece_and_tensorflow_text''' ) snake_case_ : Tuple = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(__magic_name__ , '''sentencepiece_and_tokenizers_and_vision''' ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __magic_name__ ) self.assertIn('''tensorflow_text''' , __magic_name__ ) self.assertIn('''sentencepiece_and_tokenizers''' , __magic_name__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(__magic_name__ , '''\nCONSTANT = None\n''' ) snake_case_ : Tuple = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( __magic_name__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) snake_case_ : Union[str, Any] = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' snake_case_ : Tuple = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' snake_case_ : List[str] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , __magic_name__ )
60
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
0
from collections.abc import Callable def _A ( lowerCAmelCase_ : Callable[[float], float] , lowerCAmelCase_ : float , lowerCAmelCase_ : float ): """simple docstring""" lowerCAmelCase__ = a lowerCAmelCase__ = b if function(lowerCAmelCase_ ) == 0: # one of the a or b is a root for the function return a elif function(lowerCAmelCase_ ) == 0: return b elif ( function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: lowerCAmelCase__ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowerCAmelCase_ ) == 0: return mid elif function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) < 0: lowerCAmelCase__ = mid else: lowerCAmelCase__ = mid lowerCAmelCase__ = start + (end - start) / 2.0 return mid def _A ( lowerCAmelCase_ : float ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
61
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
0
import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase__ ( lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = 1.0E4 , lowercase = False , lowercase = 1.0 , ): """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even''' SCREAMING_SNAKE_CASE : Union[str, Any] = float(embedding_dim // 2 ) SCREAMING_SNAKE_CASE : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) SCREAMING_SNAKE_CASE : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase , dtype=jnp.floataa ) * -log_timescale_increment ) SCREAMING_SNAKE_CASE : Optional[int] = jnp.expand_dims(lowercase , 1 ) * jnp.expand_dims(lowercase , 0 ) # scale embeddings SCREAMING_SNAKE_CASE : Optional[int] = scale * emb if flip_sin_to_cos: SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate([jnp.cos(lowercase ), jnp.sin(lowercase )] , axis=1 ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(lowercase ), jnp.cos(lowercase )] , axis=1 ) SCREAMING_SNAKE_CASE : Tuple = jnp.reshape(lowercase , [jnp.shape(lowercase )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' UpperCamelCase_ : int = 3_2 UpperCamelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(UpperCAmelCase_ ) return temb class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' UpperCamelCase_ : int = 3_2 UpperCamelCase_ : bool = False UpperCamelCase_ : float = 1 @nn.compact def __call__( self : Optional[int] , UpperCAmelCase_ : int ): return get_sinusoidal_embeddings( UpperCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
62
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
0
from __future__ import annotations import queue class a : """simple docstring""" def __init__( self : Union[str, Any] , __lowercase : Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : Optional[int] = data __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Union[str, Any] = None def lowerCamelCase__ ( ): print("""\n********Press N to stop entering at any point of time********\n""" ) __UpperCAmelCase : List[str] = input("""Enter the value of the root node: """ ).strip().lower() __UpperCAmelCase : queue.Queue = queue.Queue() __UpperCAmelCase : Any = TreeNode(int(__lowerCamelCase ) ) q.put(__lowerCamelCase ) while not q.empty(): __UpperCAmelCase : Optional[int] = q.get() __UpperCAmelCase : str = f"""Enter the left node of {node_found.data}: """ __UpperCAmelCase : List[str] = input(__lowerCamelCase ).strip().lower() or """n""" if check == "n": return tree_node __UpperCAmelCase : Union[str, Any] = TreeNode(int(__lowerCamelCase ) ) __UpperCAmelCase : int = left_node q.put(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = f"""Enter the right node of {node_found.data}: """ __UpperCAmelCase : str = input(__lowerCamelCase ).strip().lower() or """n""" if check == "n": return tree_node __UpperCAmelCase : Any = TreeNode(int(__lowerCamelCase ) ) __UpperCAmelCase : Union[str, Any] = right_node q.put(__lowerCamelCase ) raise def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return __UpperCAmelCase : queue.Queue = queue.Queue() q.put(__lowerCamelCase ) while not q.empty(): __UpperCAmelCase : int = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return __UpperCAmelCase : queue.Queue = queue.Queue() q.put(__lowerCamelCase ) while not q.empty(): __UpperCAmelCase : Optional[int] = [] while not q.empty(): __UpperCAmelCase : int = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__lowerCamelCase ) def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return __UpperCAmelCase : list[TreeNode] = [] __UpperCAmelCase : List[str] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__lowerCamelCase ) __UpperCAmelCase : Optional[int] = n.left # end of while means current node doesn't have left child __UpperCAmelCase : Optional[Any] = stack.pop() # start to traverse its right child __UpperCAmelCase : Any = n.right def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return __UpperCAmelCase : list[TreeNode] = [] __UpperCAmelCase : str = node while n or stack: while n: stack.append(__lowerCamelCase ) __UpperCAmelCase : List[Any] = n.left __UpperCAmelCase : Any = stack.pop() print(n.data , end=""",""" ) __UpperCAmelCase : List[str] = n.right def lowerCamelCase__ ( __lowerCamelCase : TreeNode ): if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not node: return __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] __UpperCAmelCase : str = node stacka.append(__lowerCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 __UpperCAmelCase : str = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__lowerCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def lowerCamelCase__ ( __lowerCamelCase : str = "" , __lowerCamelCase : str=50 , __lowerCamelCase : Optional[Any]="*" ): if not s: return "\n" + width * char __UpperCAmelCase , __UpperCAmelCase : Dict = divmod(width - len(__lowerCamelCase ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) a : TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
63
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Tuple= inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__: Dict= os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) SCREAMING_SNAKE_CASE__: Union[str, Any]= os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) SCREAMING_SNAKE_CASE__: str= os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def UpperCamelCase_ ( self ) -> Optional[int]: print(f'Found {torch.cuda.device_count()} devices.' ) SCREAMING_SNAKE_CASE__: Any= ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase_ ( self ) -> Dict: print(f'Found {torch.cuda.device_count()} devices.' ) SCREAMING_SNAKE_CASE__: List[str]= ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(f'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase_ ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase_ ( self ) -> Any: print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' ) SCREAMING_SNAKE_CASE__: int= ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowercase_ : str = Accelerator() lowercase_ : Any = (accelerator.state.process_index + 2, 1_0) lowercase_ : Optional[int] = torch.randint(0, 1_0, shape).to(accelerator.device) lowercase_ : str = '' lowercase_ : str = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowercase_ : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowercase_ : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
64
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
0
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() __UpperCAmelCase = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True ): '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: UpperCAmelCase__ : str = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) UpperCAmelCase__ : Tuple = config_class.from_json_file(__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : List[str] = True print(F"Building TensorFlow model from configuration: {config}" ) UpperCAmelCase__ : int = model_class(__UpperCamelCase ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): UpperCAmelCase__ : Union[str, Any] = cached_file( __UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: UpperCAmelCase__ : int = load_pytorch_checkpoint_in_tfa_model(__UpperCamelCase , __UpperCamelCase ) if compare_with_pt_model: UpperCAmelCase__ : List[Any] = tf_model(tf_model.dummy_inputs , training=__UpperCamelCase ) # build the network UpperCAmelCase__ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" ) UpperCAmelCase__ : Any = pt_model_class.from_pretrained( pretrained_model_name_or_path=__UpperCamelCase , config=__UpperCamelCase , state_dict=__UpperCamelCase ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = pt_model(**pt_model.dummy_inputs ) UpperCAmelCase__ : int = pto[0].numpy() UpperCAmelCase__ : int = tfo[0].numpy() UpperCAmelCase__ : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) ) print(F"Max absolute difference between models outputs {diff}" ) assert diff <= 2e-2, F"Error, model absolute difference is >2e-2: {diff}" # Save pytorch-model print(F"Save TensorFlow model to {tf_dump_path}" ) tf_model.save_weights(__UpperCamelCase , save_format="""h5""" ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ): '''simple docstring''' if args_model_type is None: UpperCAmelCase__ : str = list(MODEL_CLASSES.keys() ) else: UpperCAmelCase__ : int = [args_model_type] for j, model_type in enumerate(__UpperCamelCase , start=1 ): print("""=""" * 100 ) print(F" Converting model type {j}/{len(__UpperCamelCase )}: {model_type}" ) print("""=""" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: UpperCAmelCase__ : str = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: UpperCAmelCase__ : List[str] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__UpperCamelCase , __UpperCamelCase ) , start=1 ): print("""-""" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F" Skipping finetuned checkpoint {model_shortcut_name}" ) continue UpperCAmelCase__ : Dict = model_shortcut_name elif only_convert_finetuned_models: print(F" Skipping not finetuned checkpoint {model_shortcut_name}" ) continue print( F" Converting checkpoint {i}/{len(__UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" ) print("""-""" * 100 ) if config_shortcut_name in aws_config_map: UpperCAmelCase__ : int = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) else: UpperCAmelCase__ : Union[str, Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: UpperCAmelCase__ : Optional[Any] = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models ) else: UpperCAmelCase__ : Any = model_shortcut_name if os.path.isfile(__UpperCamelCase ): UpperCAmelCase__ : int = """converted_model""" convert_pt_checkpoint_to_tf( model_type=__UpperCamelCase , pytorch_checkpoint_path=__UpperCamelCase , config_file=__UpperCamelCase , tf_dump_path=os.path.join(__UpperCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__UpperCamelCase , ) if remove_cached_files: os.remove(__UpperCamelCase ) os.remove(__UpperCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.' ) parser.add_argument( '--model_type', default=None, type=str, help=( F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and " 'convert all the models from AWS.' ), ) parser.add_argument( '--pytorch_checkpoint_path', default=None, type=str, help=( 'Path to the PyTorch checkpoint path or shortcut name to download from AWS. ' 'If not given, will download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--config_file', default=None, type=str, help=( 'The config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture. If not given and ' '--pytorch_checkpoint_path is not given or is a shortcut name ' 'use the configuration associated to the shortcut name on the AWS' ), ) parser.add_argument( '--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.' ) parser.add_argument( '--use_cached_models', action='store_true', help='Use cached models if possible instead of updating to latest checkpoint versions.', ) parser.add_argument( '--remove_cached_files', action='store_true', help='Remove pytorch models after conversion (save memory when converting in batches).', ) parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.') __UpperCAmelCase = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
65
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
0
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets UpperCamelCase = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" UpperCamelCase = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" UpperCamelCase = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def __a ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ] , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="auto" , _lowerCAmelCase=-1 , _lowerCAmelCase=0.9 , _lowerCAmelCase=5 , _lowerCAmelCase=5_0_0 , _lowerCAmelCase="gpt2-large" , _lowerCAmelCase=-1 , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=2_5 , _lowerCAmelCase=5 , _lowerCAmelCase=True , _lowerCAmelCase=2_5 , ): _lowercase : Dict = compute_mauve( p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , ) return out
66
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
0
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list: _lowercase = [0] * len(snake_case__ ) for i in range(1 , len(snake_case__ ) ): # use last results for better performance - dynamic programming _lowercase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _lowercase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _lowercase = j return prefix_result def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int: return max(prefix_function(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
67
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
0
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } __A = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } __A = { "ctrl": 2_56, } __A = { "Pregnancy": 16_86_29, "Christianity": 76_75, "Explain": 10_64_23, "Fitness": 6_34_40, "Saving": 6_31_63, "Ask": 2_71_71, "Ass": 9_59_85, "Joke": 16_35_09, "Questions": 4_56_22, "Thoughts": 4_96_05, "Retail": 5_23_42, "Feminism": 16_43_38, "Writing": 1_19_92, "Atheism": 19_22_63, "Netflix": 4_86_16, "Computing": 3_96_39, "Opinion": 4_32_13, "Alone": 4_49_67, "Funny": 5_89_17, "Gaming": 4_03_58, "Human": 40_88, "India": 13_31, "Joker": 7_71_38, "Diet": 3_62_06, "Legal": 1_18_59, "Norman": 49_39, "Tip": 7_26_89, "Weight": 5_23_43, "Movies": 4_62_73, "Running": 2_34_25, "Science": 20_90, "Horror": 3_77_93, "Confession": 6_05_72, "Finance": 1_22_50, "Politics": 1_63_60, "Scary": 19_19_85, "Support": 1_26_54, "Technologies": 3_25_16, "Teenage": 6_61_60, "Event": 3_27_69, "Learned": 6_74_60, "Notion": 18_27_70, "Wikipedia": 3_75_83, "Books": 66_65, "Extract": 7_60_50, "Confessions": 10_27_01, "Conspiracy": 7_59_32, "Links": 6_36_74, "Narcissus": 15_04_25, "Relationship": 5_47_66, "Relationships": 13_47_96, "Reviews": 4_16_71, "News": 42_56, "Translation": 2_68_20, "multilingual": 12_84_06, } def lowercase__ ( A_: List[Any] ) -> Dict: """simple docstring""" __UpperCAmelCase =set() __UpperCAmelCase =word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase =char __UpperCAmelCase =set(A_ ) return pairs class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : List[str] = CONTROL_CODES def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any="<unk>" , **__SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: super().__init__(unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle: __UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={v: k for k, v in self.encoder.items()} with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle: __UpperCAmelCase =merges_handle.read().split("""\n""" )[1:-1] __UpperCAmelCase =[tuple(merge.split() ) for merge in merges] __UpperCAmelCase =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __UpperCAmelCase ={} @property def _a ( self : int ) -> List[Any]: return len(self.encoder ) def _a ( self : List[Any] ) -> Union[str, Any]: return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> Any: if token in self.cache: return self.cache[token] __UpperCAmelCase =tuple(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __UpperCAmelCase =get_pairs(__SCREAMING_SNAKE_CASE ) if not pairs: return token while True: __UpperCAmelCase =min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase =bigram __UpperCAmelCase =[] __UpperCAmelCase =0 while i < len(__SCREAMING_SNAKE_CASE ): try: __UpperCAmelCase =word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase =j if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase =tuple(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =new_word if len(__SCREAMING_SNAKE_CASE ) == 1: break else: __UpperCAmelCase =get_pairs(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""@@ """.join(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =word[:-4] __UpperCAmelCase =word return word def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> Tuple: __UpperCAmelCase =[] __UpperCAmelCase =re.findall(R"""\S+\n?""" , __SCREAMING_SNAKE_CASE ) for token in words: split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) ) return split_tokens def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple ) -> str: return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]: return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token ) def _a ( self : str , __SCREAMING_SNAKE_CASE : int ) -> List[str]: __UpperCAmelCase =""" """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip() return out_string def _a ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + """\n""" ) __UpperCAmelCase =0 with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) __UpperCAmelCase =token_index writer.write(""" """.join(__SCREAMING_SNAKE_CASE ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
68
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
0
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Dict: return EnvironmentCommand() class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): @staticmethod def A ( a_ : ArgumentParser ): """simple docstring""" __snake_case = parser.add_parser("env" ) download_parser.set_defaults(func=a_ ) def A ( self : List[Any] ): """simple docstring""" __snake_case = huggingface_hub.__version__ __snake_case = "not installed" __snake_case = "NA" if is_torch_available(): import torch __snake_case = torch.__version__ __snake_case = torch.cuda.is_available() __snake_case = "not installed" if is_transformers_available(): import transformers __snake_case = transformers.__version__ __snake_case = "not installed" if is_accelerate_available(): import accelerate __snake_case = accelerate.__version__ __snake_case = "not installed" if is_xformers_available(): import xformers __snake_case = xformers.__version__ __snake_case = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(a_ ) ) return info @staticmethod def A ( a_ : Union[str, Any] ): """simple docstring""" return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
69
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
0
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 lowerCamelCase : List[Any] = 0b101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 lowerCamelCase : List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class A: '''simple docstring''' def __init__( self : List[str] ) -> int: """simple docstring""" lowerCamelCase_ = WATERMARK_BITS lowerCamelCase_ = WatermarkEncoder() self.encoder.set_watermark('bits' , self.watermark ) def a__ ( self : Optional[Any] , A_ : torch.FloatTensor ) -> str: """simple docstring""" if images.shape[-1] < 256: return images lowerCamelCase_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCamelCase_ = [self.encoder.encode(A_ , 'dwtDct' ) for image in images] lowerCamelCase_ = torch.from_numpy(np.array(A_ ) ).permute(0 , 3 , 1 , 2 ) lowerCamelCase_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
70
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
0
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = checkpoint UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Tuple = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ : Union[str, Any] = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ : Dict = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ : Tuple = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ : Dict = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ : List[str] = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ : Optional[int] = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ : int = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ : int = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ : Dict = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ : int = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ : Tuple = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ : Any = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ : Optional[int] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ : Tuple = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ : List[str] = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ : int = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ : Optional[Any] = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_SCREAMING_SNAKE_CASE ) } for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCAmelCase_ : Dict = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) UpperCAmelCase_ : Any = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = {"old": F'''down.{i}.block''', "new": F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ : Dict = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ : List[str] = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ : str = renew_vae_attention_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) conv_attn_to_linear(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Any = num_up_blocks - 1 - i UpperCAmelCase_ : Any = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCAmelCase_ : Any = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCAmelCase_ : Dict = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = {"old": F'''up.{block_id}.block''', "new": F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ : Union[str, Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ : Dict = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] UpperCAmelCase_ : str = renew_vae_resnet_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ : Any = renew_vae_attention_paths(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE ) conv_attn_to_linear(_SCREAMING_SNAKE_CASE ) return new_checkpoint def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , ) -> int: """simple docstring""" UpperCAmelCase_ : int = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ : Optional[Any] = io.BytesIO(r.content ) UpperCAmelCase_ : Union[str, Any] = OmegaConf.load(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = 5_12 UpperCAmelCase_ : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ : List[Any] = {} with safe_open(_SCREAMING_SNAKE_CASE , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ : Any = f.get_tensor(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )["state_dict"] # Convert the VAE model. UpperCAmelCase_ : List[Any] = create_vae_diffusers_config(_SCREAMING_SNAKE_CASE , image_size=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = custom_convert_ldm_vae_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = AutoencoderKL(**_SCREAMING_SNAKE_CASE ) vae.load_state_dict(_SCREAMING_SNAKE_CASE ) vae.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") _lowerCamelCase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
71
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
'''simple docstring''' _UpperCAmelCase : int = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _UpperCAmelCase : Dict = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _UpperCAmelCase : Optional[int] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
72
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
import math def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 while num > 0: SCREAMING_SNAKE_CASE = num % 8 SCREAMING_SNAKE_CASE = octal + (remainder * math.floor(math.pow(10 , _UpperCAmelCase))) counter += 1 SCREAMING_SNAKE_CASE = math.floor(num / 8) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return F'''0o{int(_UpperCAmelCase)}''' def lowerCamelCase__ (): print('\n2 in octal is:') print(decimal_to_octal(2)) # = 2 print('\n8 in octal is:') print(decimal_to_octal(8)) # = 10 print('\n65 in octal is:') print(decimal_to_octal(65)) # = 101 print('\n216 in octal is:') print(decimal_to_octal(216)) # = 330 print('\n512 in octal is:') print(decimal_to_octal(512)) # = 1000 print('\n') if __name__ == "__main__": main()
73
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
0
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = OpenAIGPTTokenizer lowerCAmelCase__ = OpenAIGPTTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = False def lowercase_ ( self : List[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : str = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase__ : int = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : str = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_A ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_A ) ) def lowercase_ ( self : int , _A : List[Any] ): '''simple docstring''' return "lower newer", "lower newer" def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : Any = '''lower''' UpperCAmelCase__ : List[Any] = ['''low''', '''er</w>'''] UpperCAmelCase__ : List[str] = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokens + ['''<unk>'''] UpperCAmelCase__ : int = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A ) def lowercase_ ( self : List[Any] , _A : List[str]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase__ : Any = '''This is a simple input''' UpperCAmelCase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase__ : List[Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase__ : Union[str, Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def lowercase_ ( self : Dict ): '''simple docstring''' pass @require_ftfy @require_spacy @require_tokenizers class lowerCamelCase_ ( __a ): pass
75
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __lowercase : Dict = float(embedding_dim // 2 ) __lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) __lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 ) # scale embeddings __lowercase : Optional[int] = scale * emb if flip_sin_to_cos: __lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 ) else: __lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 ) __lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =jnp.floataa @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ ) __lowercase : str = nn.silu(UpperCamelCase_ ) __lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ ) return temb class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =False UpperCamelCase =1 @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: return get_sinusoidal_embeddings( UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
0
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan SCREAMING_SNAKE_CASE_: int =6378137.0 SCREAMING_SNAKE_CASE_: List[Any] =6356752.314245 SCREAMING_SNAKE_CASE_: Dict =6_37_81_37 def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> float: '''simple docstring''' UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A UpperCAmelCase_ = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) UpperCAmelCase_ = atan((1 - flattening) * tan(radians(snake_case_ ) ) ) UpperCAmelCase_ = radians(snake_case_ ) UpperCAmelCase_ = radians(snake_case_ ) # Equation UpperCAmelCase_ = sin((phi_a - phi_a) / 2 ) UpperCAmelCase_ = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda UpperCAmelCase_ = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) ) return 2 * RADIUS * asin(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod()
78
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
0
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : Optional[int] = """""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = """""" SCREAMING_SNAKE_CASE__ : Any = """""" SCREAMING_SNAKE_CASE__ : List[Any] = 1 # (0 is vertical, 1 is horizontal) def _lowerCamelCase ( ) -> None: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_dataset(__lowerCamelCase , __lowerCamelCase ) print("""Processing...""" ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = update_image_and_anno(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) for index, image in enumerate(__lowerCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCAmelCase__ : Tuple = random_chars(32 ) UpperCAmelCase__ : List[str] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] UpperCAmelCase__ : Optional[Any] = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(F"/{file_root}.jpg" , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Success {index+1}/{len(__lowerCamelCase )} with {file_name}" ) UpperCAmelCase__ : str = [] for anno in new_annos[index]: UpperCAmelCase__ : List[str] = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(__lowerCamelCase ) with open(F"/{file_root}.txt" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = [] for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ): UpperCAmelCase__ : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__lowerCamelCase ) as in_file: UpperCAmelCase__ : Tuple = in_file.readlines() UpperCAmelCase__ : Optional[Any] = os.path.join(__lowerCamelCase , F"{label_name}.jpg" ) UpperCAmelCase__ : List[str] = [] for obj_list in obj_lists: UpperCAmelCase__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCamelCase ) labels.append(__lowerCamelCase ) return img_paths, labels def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 ) -> tuple[list, list, list]: '''simple docstring''' UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Tuple = [] for idx in range(len(__lowerCamelCase ) ): UpperCAmelCase__ : str = [] UpperCAmelCase__ : str = img_list[idx] path_list.append(__lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = anno_list[idx] UpperCAmelCase__ : Any = cva.imread(__lowerCamelCase ) if flip_type == 1: UpperCAmelCase__ : str = cva.flip(__lowerCamelCase , __lowerCamelCase ) for bbox in img_annos: UpperCAmelCase__ : Tuple = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCAmelCase__ : int = cva.flip(__lowerCamelCase , __lowerCamelCase ) for bbox in img_annos: UpperCAmelCase__ : Any = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCamelCase ) new_imgs_list.append(__lowerCamelCase ) return new_imgs_list, new_annos_lists, path_list def _lowerCamelCase ( __lowerCamelCase = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" UpperCAmelCase__ : int = ascii_lowercase + digits return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
0
def snake_case ( lowerCamelCase ): '''simple docstring''' for i in range(len(lowerCamelCase ) - 1 , 0 , -1 ): __lowercase = False for j in range(lowerCamelCase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: __lowercase , __lowercase = unsorted[j - 1], unsorted[j] __lowercase = True for j in range(lowerCamelCase ): if unsorted[j] > unsorted[j + 1]: __lowercase , __lowercase = unsorted[j + 1], unsorted[j] __lowercase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() __UpperCamelCase : int = [int(item) for item in user_input.split(""",""")] print(F'''{cocktail_shaker_sort(unsorted) = }''')
80
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Optional[int]: __snake_case : Optional[Any] = params __snake_case : Union[str, Any] = np.array(lowerCamelCase ) __snake_case : List[Any] = np.array([len(lowerCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]: return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any] ) -> Tuple: return len(self.lengths ) def __snake_case ( self : Any ) -> int: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __snake_case ( self : Dict ) -> Dict: __snake_case : Optional[Any] = self.params.max_model_input_size __snake_case : str = self.lengths > max_len logger.info(F'Splitting {sum(lowerCamelCase )} too long sequences.' ) def divide_chunks(lowerCamelCase : List[Any] , lowerCamelCase : Any ): return [l[i : i + n] for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )] __snake_case : List[Any] = [] __snake_case : Dict = [] if self.params.mlm: __snake_case , __snake_case : List[str] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: __snake_case , __snake_case : Optional[int] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __snake_case : Union[str, Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __snake_case : int = np.insert(lowerCamelCase , 0 , lowerCamelCase ) if sub_s[-1] != sep_id: __snake_case : List[str] = np.insert(lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase ) assert len(lowerCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowerCamelCase ) new_tok_ids.extend(lowerCamelCase ) new_lengths.extend([len(lowerCamelCase ) for l in sub_seqs] ) __snake_case : Tuple = np.array(lowerCamelCase ) __snake_case : Union[str, Any] = np.array(lowerCamelCase ) def __snake_case ( self : Optional[Any] ) -> int: __snake_case : Optional[Any] = len(self ) __snake_case : List[Any] = self.lengths > 11 __snake_case : Optional[Any] = self.token_ids[indices] __snake_case : List[Any] = self.lengths[indices] __snake_case : Any = len(self ) logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def __snake_case ( self : Tuple ) -> List[str]: if "unk_token" not in self.params.special_tok_ids: return else: __snake_case : Optional[int] = self.params.special_tok_ids["unk_token"] __snake_case : Tuple = len(self ) __snake_case : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __snake_case : Dict = (unk_occs / self.lengths) < 0.5 __snake_case : Optional[int] = self.token_ids[indices] __snake_case : Optional[Any] = self.lengths[indices] __snake_case : List[str] = len(self ) logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def __snake_case ( self : Optional[Any] ) -> Optional[int]: if not self.params.is_master: return logger.info(F'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __snake_case ( self : str , lowerCamelCase : Optional[Any] ) -> str: __snake_case : str = [t[0] for t in batch] __snake_case : str = [t[1] for t in batch] assert len(lowerCamelCase ) == len(lowerCamelCase ) # Max for paddings __snake_case : List[Any] = max(lowerCamelCase ) # Pad token ids if self.params.mlm: __snake_case : Tuple = self.params.special_tok_ids["pad_token"] else: __snake_case : str = self.params.special_tok_ids["unk_token"] __snake_case : int = [list(t.astype(lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase )) for t in token_ids] assert len(tk_ ) == len(lowerCamelCase ) assert all(len(lowerCamelCase ) == max_seq_len_ for t in tk_ ) __snake_case : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __snake_case : List[str] = torch.tensor(lowerCamelCase ) # (bs) return tk_t, lg_t
81
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
0
"""simple docstring""" import fire from utils import calculate_rouge, save_json def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ): UpperCAmelCase_ = [x.strip() for x in open(lowerCAmelCase__ ).readlines()] UpperCAmelCase_ = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )] UpperCAmelCase_ = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) if save_path is not None: save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
82
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
0
"""simple docstring""" from torch import nn def snake_case_ ( A_ : int ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'''Unsupported activation function: {act_fn}''' )
83
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
0
import math def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = [True] * n lowercase = False lowercase = False lowercase = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): lowercase = i * 2 while index < n: lowercase = False lowercase = index + i lowercase = [2] for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ): if is_prime[i]: primes.append(__SCREAMING_SNAKE_CASE ) return primes def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ): lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100 lowercase = prime_sieve(__SCREAMING_SNAKE_CASE ) lowercase = 0 lowercase = 0 lowercase = primes[prime_index] while (last_prime**2) <= limit: lowercase = primes[prime_index + 1] lowercase = last_prime**2 lowercase = next_prime**2 # Get numbers divisible by lps(current) lowercase = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) lowercase = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps lowercase = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair lowercase = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
84
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
0
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : int = "ybelkada/fonts" def _a ( ): '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' 'Pix2StructImageProcessor. Please upgrade torch.' ) def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Any ): '''simple docstring''' requires_backends(lowercase__ , ['torch'] ) _check_torch_version() SCREAMING_SNAKE_CASE__ : Optional[int] = image_tensor.unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.functional.unfold(lowercase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) SCREAMING_SNAKE_CASE__ : Optional[int] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase__ , lowercase__ , -1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _a ( lowercase__ : str , lowercase__ : int = 36 , lowercase__ : str = "black" , lowercase__ : str = "white" , lowercase__ : int = 5 , lowercase__ : int = 5 , lowercase__ : int = 5 , lowercase__ : int = 5 , lowercase__ : Optional[bytes] = None , lowercase__ : Optional[str] = None , ): '''simple docstring''' requires_backends(lowercase__ , 'vision' ) # Add new lines so that each line is no more than 80 characters. SCREAMING_SNAKE_CASE__ : List[str] = textwrap.TextWrapper(width=80 ) SCREAMING_SNAKE_CASE__ : int = wrapper.wrap(text=lowercase__ ) SCREAMING_SNAKE_CASE__ : str = '\n'.join(lowercase__ ) if font_bytes is not None and font_path is None: SCREAMING_SNAKE_CASE__ : str = io.BytesIO(lowercase__ ) elif font_path is not None: SCREAMING_SNAKE_CASE__ : int = font_path else: SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(lowercase__ , 'Arial.TTF' ) SCREAMING_SNAKE_CASE__ : Dict = ImageFont.truetype(lowercase__ , encoding='UTF-8' , size=lowercase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. SCREAMING_SNAKE_CASE__ : Any = ImageDraw.Draw(Image.new('RGB' , (1, 1) , lowercase__ ) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = temp_draw.textbbox((0, 0) , lowercase__ , lowercase__ ) # Create the actual image with a bit of padding around the text. SCREAMING_SNAKE_CASE__ : Optional[int] = text_width + left_padding + right_padding SCREAMING_SNAKE_CASE__ : str = text_height + top_padding + bottom_padding SCREAMING_SNAKE_CASE__ : Tuple = Image.new('RGB' , (image_width, image_height) , lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ImageDraw.Draw(lowercase__ ) draw.text(xy=(left_padding, top_padding) , text=lowercase__ , fill=lowercase__ , font=lowercase__ ) return image def _a ( lowercase__ : np.ndarray , lowercase__ : str , **lowercase__ : Union[str, Any] ): '''simple docstring''' requires_backends(lowercase__ , 'vision' ) # Convert to PIL image if necessary SCREAMING_SNAKE_CASE__ : int = to_pil_image(lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = render_text(lowercase__ , **lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = max(header_image.width , image.width ) SCREAMING_SNAKE_CASE__ : int = int(image.height * (new_width / image.width) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = int(header_image.height * (new_width / header_image.width) ) SCREAMING_SNAKE_CASE__ : int = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary SCREAMING_SNAKE_CASE__ : List[str] = to_numpy_array(lowercase__ ) if infer_channel_dimension_format(lowercase__ ) == ChannelDimension.LAST: SCREAMING_SNAKE_CASE__ : Dict = to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) return new_image class snake_case ( UpperCamelCase_ ): lowercase_ = ['flattened_patches'] def __init__( self : List[Any] , a_ : bool = True , a_ : bool = True , a_ : Dict[str, int] = None , a_ : int = 2048 , a_ : bool = False , **a_ : List[Any] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16} SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize SCREAMING_SNAKE_CASE__ : Dict = do_convert_rgb SCREAMING_SNAKE_CASE__ : int = max_patches SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_vqa def __lowercase( self : Union[str, Any] , a_ : np.ndarray , a_ : int , a_ : dict , **a_ : Dict )-> np.ndarray: """simple docstring""" requires_backends(self.extract_flattened_patches , 'torch' ) _check_torch_version() # convert to torch SCREAMING_SNAKE_CASE__ : Any = to_channel_dimension_format(a_ , ChannelDimension.FIRST ) SCREAMING_SNAKE_CASE__ : Dict = torch.from_numpy(a_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = patch_size['height'], patch_size['width'] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_image_size(a_ ) # maximize scale s.t. SCREAMING_SNAKE_CASE__ : Tuple = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) SCREAMING_SNAKE_CASE__ : int = max(min(math.floor(scale * image_height / patch_height ) , a_ ) , 1 ) SCREAMING_SNAKE_CASE__ : str = max(min(math.floor(scale * image_width / patch_width ) , a_ ) , 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_feasible_rows * patch_height , 1 ) SCREAMING_SNAKE_CASE__ : List[str] = max(num_feasible_cols * patch_width , 1 ) SCREAMING_SNAKE_CASE__ : str = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=a_ , antialias=a_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE__ : Optional[int] = torch_extract_patches(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = patches.shape SCREAMING_SNAKE_CASE__ : Dict = patches_shape[1] SCREAMING_SNAKE_CASE__ : List[str] = patches_shape[2] SCREAMING_SNAKE_CASE__ : Optional[Any] = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE__ : int = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] SCREAMING_SNAKE_CASE__ : Any = torch.arange(a_ ).reshape([rows, 1] ).repeat(1 , a_ ).reshape([rows * columns, 1] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.arange(a_ ).reshape([1, columns] ).repeat(a_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] SCREAMING_SNAKE_CASE__ : Dict = row_ids.to(torch.floataa ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE__ : Dict = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.pad(a_ , [0, 0, 0, max_patches - (rows * columns)] ).float() SCREAMING_SNAKE_CASE__ : int = to_numpy_array(a_ ) return result def __lowercase( self : Dict , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[int] )-> np.ndarray: """simple docstring""" if image.dtype == np.uinta: SCREAMING_SNAKE_CASE__ : Optional[int] = image.astype(np.floataa ) # take mean across the whole `image` SCREAMING_SNAKE_CASE__ : List[str] = np.mean(a_ ) SCREAMING_SNAKE_CASE__ : Dict = np.std(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = max(a_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(a_ , mean=a_ , std=a_ , **a_ ) def __lowercase( self : Optional[Any] , a_ : ImageInput , a_ : Optional[str] = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Dict[str, int]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : str , )-> ImageInput: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE__ : str = patch_size if patch_size is not None else self.patch_size SCREAMING_SNAKE_CASE__ : List[str] = max_patches if max_patches is not None else self.max_patches SCREAMING_SNAKE_CASE__ : Any = self.is_vqa if kwargs.get('data_format' , a_ ) is not None: raise ValueError('data_format is not an accepted input as the outputs are ' ) SCREAMING_SNAKE_CASE__ : Any = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE__ : str = [convert_to_rgb(a_ ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Tuple = [to_numpy_array(a_ ) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.' ) SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('font_bytes' , a_ ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('font_path' , a_ ) if isinstance(a_ , a_ ): SCREAMING_SNAKE_CASE__ : List[Any] = [header_text] * len(a_ ) SCREAMING_SNAKE_CASE__ : Any = [ render_header(a_ , header_text[i] , font_bytes=a_ , font_path=a_ ) for i, image in enumerate(a_ ) ] if do_normalize: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.normalize(image=a_ ) for image in images] # convert to torch tensor and permute SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ self.extract_flattened_patches(image=a_ , max_patches=a_ , patch_size=a_ ) for image in images ] # create attention mask in numpy SCREAMING_SNAKE_CASE__ : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] SCREAMING_SNAKE_CASE__ : List[str] = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=a_ ) return encoded_outputs
85
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a :Any = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __a :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
0
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _lowerCamelCase : Optional[int] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , **UpperCAmelCase__ : Optional[int]) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''bs4''']) super().__init__(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict) ->Optional[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag A__ = parent.find_all(child.name , recursive=UpperCAmelCase__) xpath_tags.append(child.name) xpath_subscripts.append( 0 if 1 == len(UpperCAmelCase__) else next(i for i, s in enumerate(UpperCAmelCase__ , 1) if s is child)) A__ = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict) ->Optional[int]: '''simple docstring''' A__ = BeautifulSoup(UpperCAmelCase__ , '''html.parser''') A__ = [] A__ = [] A__ = [] for element in html_code.descendants: if type(UpperCAmelCase__) == bsa.element.NavigableString: if type(element.parent) != bsa.element.Tag: continue A__ = html.unescape(UpperCAmelCase__).strip() if not text_in_this_tag: continue all_doc_strings.append(UpperCAmelCase__) A__ , A__ = self.xpath_soup(UpperCAmelCase__) stringaxtag_seq.append(UpperCAmelCase__) stringaxsubs_seq.append(UpperCAmelCase__) if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError('''Number of doc strings and xtags does not correspond''') if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError('''Number of doc strings and xsubs does not correspond''') return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ = '''''' for tagname, subs in zip(UpperCAmelCase__ , UpperCAmelCase__): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self : Optional[Any] , UpperCAmelCase__ : Tuple) ->BatchFeature: '''simple docstring''' A__ = False # Check that strings has a valid type if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = True elif isinstance(UpperCAmelCase__ , (list, tuple)): if len(UpperCAmelCase__) == 0 or isinstance(html_strings[0] , UpperCAmelCase__): A__ = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(UpperCAmelCase__)}.""") A__ = bool(isinstance(UpperCAmelCase__ , (list, tuple)) and (isinstance(html_strings[0] , UpperCAmelCase__))) if not is_batched: A__ = [html_strings] # Get nodes + xpaths A__ = [] A__ = [] for html_string in html_strings: A__ , A__ , A__ = self.get_three_from_single(UpperCAmelCase__) nodes.append(UpperCAmelCase__) A__ = [] for node, tag_list, sub_list in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__): A__ = self.construct_xpath(UpperCAmelCase__ , UpperCAmelCase__) xpath_strings.append(UpperCAmelCase__) xpaths.append(UpperCAmelCase__) # return as Dict A__ = {'''nodes''': nodes, '''xpaths''': xpaths} A__ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__) return encoded_inputs
87
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
0
"""simple docstring""" import torch from transformers import AutoModel class lowercase__ ( torch.nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased") -> str: super(SCREAMING_SNAKE_CASE , self).__init__() _lowerCamelCase : Union[str, Any] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = torch.nn.CosineSimilarity(3 , 1e-0_8) _lowerCamelCase : Optional[int] = torch.nn.Softmax(dim=1) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> str: return self.bert(**SCREAMING_SNAKE_CASE).last_hidden_state def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1) -> Union[str, Any]: return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = W_supports["""sizes"""].tolist() _lowerCamelCase : int = W_supports["""start_token_id"""].item() _lowerCamelCase : str = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _lowerCamelCase : List[str] = self.BERT(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = None _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = W_supports["""input_ids"""] == start_token_id _lowerCamelCase : Any = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(SCREAMING_SNAKE_CASE): if i == 0: _lowerCamelCase : List[str] = 0 else: _lowerCamelCase : Dict = support_sizes[i - 1] _lowerCamelCase : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]] _lowerCamelCase : Any = S[s : s + size][end_token_masks[s : s + size]] _lowerCamelCase : Any = torch.matmul(q[i] , s_start.T).sum(1).softmax(0) _lowerCamelCase : List[str] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0) if p_starts is not None: _lowerCamelCase : str = torch.vstack((p_starts, p_start)) _lowerCamelCase : Optional[Any] = torch.vstack((p_ends, p_end)) else: _lowerCamelCase : Optional[Any] = p_start _lowerCamelCase : int = p_end return p_starts, p_ends
88
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class _lowerCamelCase( _a ): lowercase_ : Dict = """dpr""" def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=0, lowerCamelCase="absolute", lowerCamelCase = 0, **lowerCamelCase, ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase) _lowercase : Optional[Any] = vocab_size _lowercase : Optional[int] = hidden_size _lowercase : Tuple = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : int = attention_probs_dropout_prob _lowercase : List[str] = max_position_embeddings _lowercase : Optional[int] = type_vocab_size _lowercase : List[str] = initializer_range _lowercase : Dict = layer_norm_eps _lowercase : str = projection_dim _lowercase : int = position_embedding_type
89
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
0
'''simple docstring''' import math import sys def _snake_case ( A ) -> str: lowerCAmelCase__ = '''''' try: with open(A , '''rb''' ) as binary_file: lowerCAmelCase__ = binary_file.read() for dat in data: lowerCAmelCase__ = F"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def _snake_case ( A ) -> str: lowerCAmelCase__ = {'''0''': '''0''', '''1''': '''1'''} lowerCAmelCase__ , lowerCAmelCase__ = '''''', '''''' lowerCAmelCase__ = len(A ) for i in range(len(A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCAmelCase__ = lexicon[curr_string] result += last_match_id lowerCAmelCase__ = last_match_id + '''0''' if math.loga(A ).is_integer(): lowerCAmelCase__ = {} for curr_key in list(A ): lowerCAmelCase__ = lexicon.pop(A ) lowerCAmelCase__ = new_lex lowerCAmelCase__ = last_match_id + '''1''' index += 1 lowerCAmelCase__ = '''''' return result def _snake_case ( A , A ) -> None: lowerCAmelCase__ = 8 try: with open(A , '''wb''' ) as opened_file: lowerCAmelCase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A ) , A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def _snake_case ( A ) -> str: lowerCAmelCase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowerCAmelCase__ = data_bits[counter:] lowerCAmelCase__ = data_bits[counter + 1 :] return data_bits def _snake_case ( A , A ) -> None: lowerCAmelCase__ = read_file_binary(A ) lowerCAmelCase__ = remove_prefix(A ) lowerCAmelCase__ = decompress_data(A ) write_file_binary(A , A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
90
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
0
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowercase = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def _snake_case ( snake_case__ : str=True ): if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = None _lowerCamelCase: str = None def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Tuple ) -> Any: with TemporaryDirectory() as tmp_dir: A = dataset_module_factory(A_ ,cache_dir=A_ ) A = import_main_class(dataset_module.module_path ,dataset=A_ ) A = builder_cls( cache_dir=A_ ,config_name=A_ ,hash=dataset_module.hash ,) A = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A_ ).replace(os.sep ,'/' ), config.DATASET_INFO_FILENAME, ] ) A = cached_path(A_ ,cache_dir=A_ ) self.assertTrue(os.path.exists(A_ ) ) @pytest.mark.integration def _snake_case ( snake_case__ : Optional[int] ): A = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' A = dataset_module_factory('wikipedia' , cache_dir=snake_case__ ) A = import_main_class(dataset_module.module_path ) A = builder_cls( cache_dir=snake_case__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam A = None builder_instance.download_and_prepare() A = builder_instance.as_dataset() assert ds @pytest.mark.integration def _snake_case ( snake_case__ : List[Any] ): A = dataset_module_factory('wikipedia' , cache_dir=snake_case__ ) A = import_main_class(dataset_module.module_path , dataset=snake_case__ ) A = builder_cls( cache_dir=snake_case__ , config_name='20220301.frr' , hash=dataset_module.hash , ) A = builder_instance.as_streaming_dataset() assert ds assert isinstance(snake_case__ , snake_case__ ) assert "train" in ds assert isinstance(ds['train'] , snake_case__ ) assert next(iter(ds['train'] ) )
91
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : str ): '''simple docstring''' # test for the above condition self.test() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =0 lowercase : List[Any] =False while not completed: if counter == 1: self.reset() lowercase : Dict =self.advance() if not self.does_advance(UpperCAmelCase__ ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) lowercase , lowercase , lowercase : str =self.update(UpperCAmelCase__ ) counter += 1 if counter > 10000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def lowerCamelCase_ ( self : Any ): '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ): '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ): '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ): '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : int , UpperCAmelCase__ : List[int] ): '''simple docstring''' super(UpperCAmelCase__ , self ).__init__() if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) lowercase : Union[str, Any] =token_ids lowercase : Tuple =len(self.token_ids ) lowercase : Tuple =-1 # the index of the currently fulfilled step lowercase : int =False def lowerCamelCase_ ( self : int ): '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' ) lowercase : Any =False lowercase : Optional[int] =False lowercase : int =False if self.does_advance(UpperCAmelCase__ ): self.fulfilled_idx += 1 lowercase : Any =True if self.fulfilled_idx == (self.seqlen - 1): lowercase : List[str] =True lowercase : List[Any] =completed else: # failed to make progress. lowercase : Union[str, Any] =True self.reset() return stepped, completed, reset def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : int =False lowercase : Tuple =0 def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Dict =PhrasalConstraint(self.token_ids ) if stateful: lowercase : Union[str, Any] =self.seqlen lowercase : Optional[Any] =self.fulfilled_idx lowercase : List[str] =self.completed return new_constraint class __SCREAMING_SNAKE_CASE : def __init__( self : Any , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : Dict=True ): '''simple docstring''' lowercase : str =max([len(UpperCAmelCase__ ) for one in nested_token_ids] ) lowercase : Any ={} for token_ids in nested_token_ids: lowercase : Any =root for tidx, token_id in enumerate(UpperCAmelCase__ ): if token_id not in level: lowercase : int ={} lowercase : Union[str, Any] =level[token_id] if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F''' {nested_token_ids}.''' ) lowercase : Dict =root def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : Any =self.trie for current_token in current_seq: lowercase : List[str] =start[current_token] lowercase : Optional[Any] =list(start.keys() ) return next_tokens def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Union[str, Any] =self.next_tokens(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 0 def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ): '''simple docstring''' lowercase : Any =list(root.values() ) if len(UpperCAmelCase__ ) == 0: return 1 else: return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[str] =self.count_leaves(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) != leaf_count class __SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self : List[str] , UpperCAmelCase__ : List[List[int]] ): '''simple docstring''' super(UpperCAmelCase__ , self ).__init__() if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) lowercase : Any =DisjunctiveTrie(UpperCAmelCase__ ) lowercase : Tuple =nested_token_ids lowercase : Dict =self.trie.max_height lowercase : List[str] =[] lowercase : str =False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Any =self.trie.next_tokens(self.current_seq ) if len(UpperCAmelCase__ ) == 0: return None else: return token_list def lowerCamelCase_ ( self : int , UpperCAmelCase__ : int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' ) lowercase : Union[str, Any] =self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' ) lowercase : Any =False lowercase : Any =False lowercase : Optional[Any] =False if self.does_advance(UpperCAmelCase__ ): self.current_seq.append(UpperCAmelCase__ ) lowercase : Dict =True else: lowercase : Tuple =True self.reset() lowercase : Union[str, Any] =self.trie.reached_leaf(self.current_seq ) lowercase : int =completed return stepped, completed, reset def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : int =False lowercase : Tuple =[] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Any=False ): '''simple docstring''' lowercase : Dict =DisjunctiveConstraint(self.token_ids ) if stateful: lowercase : Union[str, Any] =self.seqlen lowercase : int =self.current_seq lowercase : Dict =self.completed return new_constraint class __SCREAMING_SNAKE_CASE : def __init__( self : int , UpperCAmelCase__ : List[Constraint] ): '''simple docstring''' lowercase : List[str] =constraints # max # of steps required to fulfill a given constraint lowercase : str =max([c.seqlen for c in constraints] ) lowercase : Any =len(UpperCAmelCase__ ) lowercase : Union[str, Any] =False self.init_state() def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[str] =[] lowercase : Optional[Any] =None lowercase : List[Any] =[constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints] def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =[] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" lowercase : List[str] =constraint.advance() if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): token_list.append(UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): token_list.extend(UpperCAmelCase__ ) else: lowercase : str =self.inprogress_constraint.advance() if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): token_list.append(UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): token_list.extend(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) == 0: return None else: return token_list def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[List[int]] ): '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint lowercase , lowercase : Optional[Any] =self.add(UpperCAmelCase__ ) # the entire list of constraints are fulfilled if self.completed: break def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) lowercase , lowercase : List[Any] =False, False if self.completed: lowercase : Tuple =True lowercase : Union[str, Any] =False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state lowercase , lowercase , lowercase : Union[str, Any] =self.inprogress_constraint.update(UpperCAmelCase__ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) ) lowercase : List[str] =None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) lowercase : Optional[Any] =None if len(self.pending_constraints ) == 0: # we're done! lowercase : int =True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCAmelCase__ ): lowercase , lowercase , lowercase : List[Any] =pending_constraint.update(UpperCAmelCase__ ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(UpperCAmelCase__ ) lowercase : Union[str, Any] =None if not complete and stepped: lowercase : Any =pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". lowercase : int =( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. lowercase : List[Any] =True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Optional[int]=True ): '''simple docstring''' lowercase : int =ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: lowercase : Tuple =[ constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: lowercase : Dict =self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) lowercase : int =[constraint.copy() for constraint in self.pending_constraints] return new_state
92
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
0
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=4 , ): '''simple docstring''' lowerCAmelCase__ :int = parent lowerCAmelCase__ :Dict = batch_size lowerCAmelCase__ :Union[str, Any] = seq_length lowerCAmelCase__ :Union[str, Any] = is_training lowerCAmelCase__ :int = use_attention_mask lowerCAmelCase__ :Optional[int] = use_token_type_ids lowerCAmelCase__ :Any = use_labels lowerCAmelCase__ :Dict = vocab_size lowerCAmelCase__ :Optional[Any] = hidden_size lowerCAmelCase__ :Tuple = num_hidden_layers lowerCAmelCase__ :int = num_attention_heads lowerCAmelCase__ :List[Any] = intermediate_size lowerCAmelCase__ :List[Any] = hidden_act lowerCAmelCase__ :Optional[Any] = hidden_dropout_prob lowerCAmelCase__ :Tuple = attention_probs_dropout_prob lowerCAmelCase__ :Any = max_position_embeddings lowerCAmelCase__ :Optional[Any] = type_vocab_size lowerCAmelCase__ :int = type_sequence_label_size lowerCAmelCase__ :Union[str, Any] = initializer_range lowerCAmelCase__ :str = num_choices def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :str = None if self.use_attention_mask: lowerCAmelCase__ :List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ :List[str] = None if self.use_token_type_ids: lowerCAmelCase__ :Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ :Any = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = config_and_inputs lowerCAmelCase__ :Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Any = True __magic_name__ :List[Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = FlaxRoFormerModelTester(self ) @slow def snake_case ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: lowerCAmelCase__ :Dict = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCAmelCase ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) lowerCAmelCase__ :Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase__ :Tuple = model(__UpperCAmelCase )[0] lowerCAmelCase__ :Any = 5_0_0_0_0 lowerCAmelCase__ :int = (1, 6, vocab_size) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ :List[Any] = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
93
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
'''simple docstring''' import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def lowercase_ ( ) -> int: """simple docstring""" lowercase : Any =argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=__A , default=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=__A , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=__A , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=__A , default=4_2 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=__A , default=0 , help='''cuda_id.''' , ) lowercase : Dict =parser.parse_args() return args def lowercase_ ( __A : Any , __A : Dict , __A : List[Any] ) -> Dict: """simple docstring""" if not len(__A ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowercase , lowercase : Optional[int] =imgs[0].size lowercase : Optional[Any] =Image.new('''RGB''' , size=(cols * w, rows * h) ) lowercase , lowercase : Tuple =grid.size for i, img in enumerate(__A ): grid.paste(__A , box=(i % cols * w, i // cols * h) ) return grid def lowercase_ ( __A : Tuple , __A : Optional[int]="robotic cat with wings" , __A : int=7.5 , __A : List[Any]=5_0 , __A : Union[str, Any]=1 , __A : Tuple=4_2 , ) -> Tuple: """simple docstring""" lowercase : List[str] =torch.Generator(pipeline.device ).manual_seed(__A ) lowercase : str =pipeline( __A , guidance_scale=__A , num_inference_steps=__A , generator=__A , num_images_per_prompt=__A , ).images lowercase : Optional[int] =int(math.sqrt(__A ) ) lowercase : Dict =image_grid(__A , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images SCREAMING_SNAKE_CASE = parse_args() # Load models and create wrapper for stable diffusion SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') SCREAMING_SNAKE_CASE = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') SCREAMING_SNAKE_CASE = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) SCREAMING_SNAKE_CASE = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): SCREAMING_SNAKE_CASE = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: SCREAMING_SNAKE_CASE = unet.to(torch.device('cuda', args.cuda_id)) SCREAMING_SNAKE_CASE = pipeline.to(unet.device) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) SCREAMING_SNAKE_CASE = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
94
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets lowerCamelCase_ = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' lowerCamelCase_ = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' lowerCamelCase_ = r''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ (datasets.Metric ): def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = 0.0 for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ): n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0 UpperCAmelCase_ : Any = n_correct / len(lowerCAmelCase_ ) return { "accuracy": accuracy, }
95
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __lowerCamelCase = tuple[int, int] class __A : def __init__( self : Tuple , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ) -> None: __magic_name__: set[int] = vertices __magic_name__: dict[EdgeT, int] = { (min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items() } def lowerCamelCase__ ( self : List[Any] , __snake_case : EdgeT , __snake_case : int ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __magic_name__: int = weight def lowerCamelCase__ ( self : Tuple ) -> Graph: __magic_name__: Graph = Graph({min(self.vertices )} , {} ) __magic_name__: EdgeT __magic_name__: int __magic_name__: EdgeT __magic_name__: int while len(subgraph.vertices ) < len(self.vertices ): __magic_name__: Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __magic_name__: List[Any] = edge __magic_name__: Optional[Any] = weight subgraph.add_edge(__snake_case , __snake_case ) return subgraph def a ( __UpperCAmelCase : str = "p107_network.txt" ) -> int: __magic_name__: str = os.path.abspath(os.path.dirname(__UpperCAmelCase ) ) __magic_name__: str = os.path.join(__UpperCAmelCase , __UpperCAmelCase ) __magic_name__: dict[EdgeT, int] = {} __magic_name__: list[str] __magic_name__: int __magic_name__: int with open(__UpperCAmelCase ) as f: __magic_name__: Optional[int] = f.read().strip().split("""\n""" ) __magic_name__: List[str] = [line.split(""",""" ) for line in data] for edgea in range(1 , len(__UpperCAmelCase ) ): for edgea in range(__UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": __magic_name__: List[str] = int(adjaceny_matrix[edgea][edgea] ) __magic_name__: Graph = Graph(set(range(len(__UpperCAmelCase ) ) ) , __UpperCAmelCase ) __magic_name__: Graph = graph.prims_algorithm() __magic_name__: int = sum(graph.edges.values() ) __magic_name__: int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
96
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def a ( snake_case__: Any ): '''simple docstring''' lowercase_ = filter(lambda snake_case__ : p.requires_grad , model.parameters() ) lowercase_ = sum([np.prod(p.size() ) for p in model_parameters] ) return params __a = logging.getLogger(__name__) def a ( snake_case__: int , snake_case__: Any ): '''simple docstring''' if metric == "rouge2": lowercase_ = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": lowercase_ = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": lowercase_ = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": lowercase_ = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) lowercase_ = ModelCheckpoint( dirpath=snake_case__ , filename=snake_case__ , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def a ( snake_case__: str , snake_case__: List[str] ): '''simple docstring''' return EarlyStopping( monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=snake_case__ , verbose=snake_case__ , ) class lowercase__( pl.Callback ): """simple docstring""" def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]: lowercase_ = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ ) @rank_zero_only def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : pl.LightningModule , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=True ) -> None: logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowercase_ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results lowercase_ = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase_ = od / '''test_results.txt''' lowercase_ = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' lowercase_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , '''a+''' ) as writer: for key in sorted(SCREAMING_SNAKE_CASE_ ): if key in ["log", "progress_bar", "preds"]: continue lowercase_ = metrics[key] if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): lowercase_ = val.item() lowercase_ = f'''{key}: {val:.6f}\n''' writer.write(SCREAMING_SNAKE_CASE_ ) if not save_generations: return if "preds" in metrics: lowercase_ = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(SCREAMING_SNAKE_CASE_ ) @rank_zero_only def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]: try: lowercase_ = pl_module.model.model.num_parameters() except AttributeError: lowercase_ = pl_module.model.num_parameters() lowercase_ = count_trainable_parameters(SCREAMING_SNAKE_CASE_ ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : pl.LightningModule ) -> Any: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''test''' ) @rank_zero_only def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
97
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
0
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : List[str] = 'new-model' if is_tf_available(): class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : List[Any] = NewModelConfig @require_tf class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = '''bert-base-cased''' _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : Dict ) -> Tuple: '''simple docstring''' _UpperCamelCase = '''bert-base-cased''' _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : str ) -> str: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' for model_name in ["bert-base-uncased"]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' for model_name in ["bert-base-uncased"]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow @require_tensorflow_probability def snake_case__ ( self : int ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _UpperCamelCase = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase , _UpperCamelCase = TFAutoModelForTableQuestionAnswering.from_pretrained( lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14410 ) def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14410 ) def snake_case__ ( self : Any ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = copy.deepcopy(model.config ) _UpperCamelCase = ['''FunnelBaseModel'''] _UpperCamelCase = TFAutoModel.from_config(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = TFAutoModel.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] ) -> str: '''simple docstring''' try: AutoConfig.register('''new-model''' , lowerCAmelCase__ ) _UpperCamelCase = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowerCAmelCase__ ): auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ ) auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase__ ): auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCamelCase = BertModelTester(self ).get_config() _UpperCamelCase = NewModelConfig(**tiny_config.to_dict() ) _UpperCamelCase = auto_class.from_config(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = auto_class.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def snake_case__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' with self.assertRaisesRegex( lowerCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ): _UpperCamelCase = TFAutoModel.from_pretrained('''bert-base''' ) def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _UpperCamelCase = TFAutoModel.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def snake_case__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex(lowerCAmelCase__ , '''Use `from_pt=True` to load this model''' ): _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def snake_case__ ( self : Optional[Any] ) -> str: '''simple docstring''' _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: _UpperCamelCase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _UpperCamelCase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: _UpperCamelCase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
98
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int: return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
694
0
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : """simple docstring""" def __init__( self , __A , __A=13 , __A=3 , __A=True , __A=True , __A=0.1 , __A=0.1 , __A=224 , __A=1000 , __A=[3, 3, 6, 4] , __A=[48, 56, 112, 220] , ): __a = parent __a = batch_size __a = num_channels __a = is_training __a = use_labels __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = num_labels __a = image_size __a = layer_depths __a = embed_dims def snake_case_ ( self ): __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = None if self.use_labels: __a = ids_tensor([self.batch_size] , self.num_labels ) __a = self.get_config() return config, pixel_values, labels def snake_case_ ( self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__A , layer_scale_init_value=1E-5 , ) def snake_case_ ( self , __A , __A , __A ): __a = SwiftFormerModel(config=__A ) model.to(__A ) model.eval() __a = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def snake_case_ ( self , __A , __A , __A ): __a = self.num_labels __a = SwiftFormerForImageClassification(__A ) model.to(__A ) model.eval() __a = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __a = SwiftFormerForImageClassification(__A ) model.to(__A ) model.eval() __a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ ( self ): ((__a) , (__a) , (__a)) = self.prepare_config_and_inputs() __a = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( __A , __A , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _lowerCamelCase = ( {"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case_ ( self ): __a = SwiftFormerModelTester(self ) __a = ConfigTester( self , config_class=__A , has_text_modality=__A , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def snake_case_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def snake_case_ ( self ): pass def snake_case_ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__A ) __a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def snake_case_ ( self ): __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = model_class(__A ) __a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a = [*signature.parameters.keys()] __a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) def snake_case_ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def snake_case_ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def snake_case_ ( self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a = SwiftFormerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def snake_case_ ( self ): pass def snake_case_ ( self ): def check_hidden_states_output(__A , __A , __A ): __a = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): __a = model(**self._prepare_for_class(__A , __A ) ) __a = outputs.hidden_states __a = 8 self.assertEqual(len(__A ) , __A ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__A ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __a , __a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a = True check_hidden_states_output(__A , __A , __A ) def snake_case_ ( self ): def _config_zero_init(__A ): __a = copy.deepcopy(__A ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__A , __A , 1E-10 ) if isinstance(getattr(__A , __A , __A ) , __A ): __a = _config_zero_init(getattr(__A , __A ) ) setattr(__A , __A , __A ) return configs_no_init __a , __a = self.model_tester.prepare_config_and_inputs_for_common() __a = _config_zero_init(__A ) for model_class in self.all_model_classes: __a = model_class(config=__A ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def snake_case_ ( self ): pass def a (): __a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case_ ( self ): return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def snake_case_ ( self ): __a = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__A ) __a = self.default_image_processor __a = prepare_img() __a = image_processor(images=__A , return_tensors="""pt""" ).to(__A ) # forward pass with torch.no_grad(): __a = model(**__A ) # verify the logits __a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __A ) __a = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
99
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list: _a : Tuple =len(_UpperCAmelCase ) _a : str =[] for i in range(len(_UpperCAmelCase ) - pat_len + 1 ): _a : int =True for j in range(_UpperCAmelCase ): if s[i + j] != pattern[j]: _a : int =False break if match_found: position.append(_UpperCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
694
0
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _A : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Any = BartphoTokenizer lowerCamelCase__ : Tuple = False lowerCamelCase__ : Dict = True def lowercase_ ( self ): '''simple docstring''' super().setUp() SCREAMING_SNAKE_CASE__ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) ) SCREAMING_SNAKE_CASE__ = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , **A_ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ ) def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''This is a là test''' SCREAMING_SNAKE_CASE__ = '''This is a<unk><unk> test''' return input_text, output_text def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ = '''This is a là test''' SCREAMING_SNAKE_CASE__ = '''▁This ▁is ▁a ▁l à ▁t est'''.split() SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
100
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple: '''simple docstring''' _a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8} _a : int =parent _a : Optional[int] =batch_size _a : List[str] =num_channels _a : Optional[Any] =image_size _a : int =min_resolution _a : str =max_resolution _a : str =do_resize _a : Tuple =size _a : Tuple =do_normalize def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self :List[Any] ) -> List[Any]: '''simple docstring''' _a : Any =ImageGPTImageProcessingTester(self ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self :Dict ) -> Any: '''simple docstring''' _a : Optional[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) ) def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' _a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} ) _a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} ) def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) _a : Dict =json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) ) else: self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' _a : List[Any] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" ) image_processor_first.to_json_file(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict() _a : Tuple =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' _a : List[str] =self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE ) _a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict() _a : Union[str, Any] =image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: _a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" ) _a : Dict =Image.open(dataset[4]["""file"""] ) _a : Optional[int] =Image.open(dataset[5]["""file"""] ) _a : Optional[Any] =[imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) _a : int =prepare_images() # test non-batched _a : Dict =image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) ) _a : Optional[int] =[3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE ) # test batched _a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) ) _a : Any =[3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
694
0
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __lowercase : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = parent SCREAMING_SNAKE_CASE_ : int = batch_size SCREAMING_SNAKE_CASE_ : Tuple = seq_length SCREAMING_SNAKE_CASE_ : int = is_training SCREAMING_SNAKE_CASE_ : Any = use_input_mask SCREAMING_SNAKE_CASE_ : int = use_token_type_ids SCREAMING_SNAKE_CASE_ : Any = use_labels SCREAMING_SNAKE_CASE_ : List[str] = vocab_size SCREAMING_SNAKE_CASE_ : int = hidden_size SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Any = hidden_act SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : int = initializer_range SCREAMING_SNAKE_CASE_ : str = num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices SCREAMING_SNAKE_CASE_ : Tuple = scope def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ : List[Any] = None SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[Any] = None SCREAMING_SNAKE_CASE_ : int = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = FalconModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : int = FalconModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Dict = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = FalconForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = FalconForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # first forward pass SCREAMING_SNAKE_CASE_ : str = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE_ : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE_ : List[str] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['hidden_states'][0] SCREAMING_SNAKE_CASE_ : Any = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : Any = config_and_inputs SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase = (FalconForCausalLM,) if is_torch_available() else () _UpperCAmelCase = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = FalconModelTester(self ) SCREAMING_SNAKE_CASE_ : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: SCREAMING_SNAKE_CASE_ : Dict = alibi self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : int = 3 SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1 ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : int = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Tuple = 3 SCREAMING_SNAKE_CASE_ : Optional[Any] = 'single_label_classification' SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : str = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Dict = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ : Dict = FalconForCausalLM(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0] SCREAMING_SNAKE_CASE_ : Optional[int] = model._convert_to_rw_cache(result.past_key_values ) SCREAMING_SNAKE_CASE_ : Optional[int] = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ ) for layer in range(len(lowerCAmelCase__ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[Any] = 3 SCREAMING_SNAKE_CASE_ : Dict = 'multi_label_classification' SCREAMING_SNAKE_CASE_ : Dict = input_dict['input_ids'] SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE_ : Tuple = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(lowerCAmelCase__ , 'use_cache' ): return SCREAMING_SNAKE_CASE_ : Dict = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ ) if "use_cache" not in inputs: SCREAMING_SNAKE_CASE_ : Union[str, Any] = True SCREAMING_SNAKE_CASE_ : Any = model(**lowerCAmelCase__ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return SCREAMING_SNAKE_CASE_ : Optional[Any] = ( getattr(lowerCAmelCase__ , 'decoder_layers' , lowerCAmelCase__ ) or getattr(lowerCAmelCase__ , 'num_decoder_layers' , lowerCAmelCase__ ) or config.num_hidden_layers ) SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase__ , 'num_kv_heads' , config.num_attention_heads ) SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase__ , 'd_model' , config.hidden_size ) SCREAMING_SNAKE_CASE_ : List[str] = embed_dim // num_attention_heads SCREAMING_SNAKE_CASE_ : List[Any] = outputs['past_key_values'] self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs['input_ids'].shape for i in range(lowerCAmelCase__ ): if config.new_decoder_architecture: SCREAMING_SNAKE_CASE_ : Any = config.num_attention_heads elif config.multi_query: SCREAMING_SNAKE_CASE_ : Any = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __lowercase (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) SCREAMING_SNAKE_CASE_ : Dict = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = FalconForCausalLM.from_pretrained(lowerCAmelCase__ ) model.eval() model.to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowerCAmelCase__ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 ) model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 ) model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 ) @slow def UpperCamelCase__ ( self ): """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = FalconForCausalLM.from_pretrained(lowerCAmelCase__ ) model.eval() model.to(device=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowerCAmelCase__ ) # Test results are the same with and without cache SCREAMING_SNAKE_CASE_ : Optional[Any] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
101
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool: _a : Optional[int] =len(_UpperCAmelCase ) _a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): _a : Any =True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): _a : int =False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: _a : Optional[Any] =subset[i - 1][j] if arr[i - 1] <= j: _a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
694
0
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : torch.FloatTensor __lowerCAmelCase : Optional[torch.FloatTensor] = None def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.9_99 , SCREAMING_SNAKE_CASE="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCamelCase : Any = [] for i in range(SCREAMING_SNAKE_CASE ): UpperCamelCase : int = i / num_diffusion_timesteps UpperCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" @register_to_config def __init__( self , _A = 1_0_0_0 , _A = "fixed_small_log" , _A = True , _A = 1.0 , _A = "epsilon" , _A = "squaredcos_cap_v2" , ): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" ) UpperCamelCase : Optional[int] = betas_for_alpha_bar(_A ) UpperCamelCase : int = 1.0 - self.betas UpperCamelCase : Any = torch.cumprod(self.alphas , dim=0 ) UpperCamelCase : Any = torch.tensor(1.0 ) # standard deviation of the initial noise distribution UpperCamelCase : str = 1.0 # setable values UpperCamelCase : str = None UpperCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 , _A )[::-1].copy() ) UpperCamelCase : List[str] = variance_type def _a ( self , _A , _A = None ): '''simple docstring''' return sample def _a ( self , _A , _A = None ): '''simple docstring''' UpperCamelCase : List[Any] = num_inference_steps UpperCamelCase : int = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) UpperCamelCase : Tuple = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa ) UpperCamelCase : Union[str, Any] = torch.from_numpy(_A ).to(_A ) def _a ( self , _A , _A=None , _A=None , _A=None ): '''simple docstring''' if prev_timestep is None: UpperCamelCase : Any = t - 1 UpperCamelCase : Tuple = self.alphas_cumprod[t] UpperCamelCase : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase : Tuple = 1 - alpha_prod_t UpperCamelCase : int = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase : Dict = self.betas[t] else: UpperCamelCase : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCamelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: UpperCamelCase : List[Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": UpperCamelCase : Union[str, Any] = torch.log(torch.clamp(_A , min=1e-2_0 ) ) UpperCamelCase : List[str] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler UpperCamelCase : Union[str, Any] = variance.log() UpperCamelCase : Tuple = beta.log() UpperCamelCase : Any = (predicted_variance + 1) / 2 UpperCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def _a ( self , _A , _A , _A , _A = None , _A=None , _A = True , ): '''simple docstring''' UpperCamelCase : Optional[int] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": UpperCamelCase , UpperCamelCase : Dict = torch.split(_A , sample.shape[1] , dim=1 ) else: UpperCamelCase : Dict = None # 1. compute alphas, betas if prev_timestep is None: UpperCamelCase : Tuple = t - 1 UpperCamelCase : Tuple = self.alphas_cumprod[t] UpperCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: UpperCamelCase : List[Any] = self.betas[t] UpperCamelCase : str = self.alphas[t] else: UpperCamelCase : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev UpperCamelCase : List[str] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCamelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCamelCase : int = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" """ for the UnCLIPScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCamelCase : Dict = torch.clamp( _A , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t UpperCamelCase : int = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase : Union[str, Any] = 0 if t > 0: UpperCamelCase : Any = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device ) UpperCamelCase : int = self._get_variance( _A , predicted_variance=_A , prev_timestep=_A , ) if self.variance_type == "fixed_small_log": UpperCamelCase : Dict = variance elif self.variance_type == "learned_range": UpperCamelCase : str = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" """ for the UnCLIPScheduler.""" ) UpperCamelCase : str = variance * variance_noise UpperCamelCase : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A ) def _a ( self , _A , _A , _A , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) UpperCamelCase : List[str] = timesteps.to(original_samples.device ) UpperCamelCase : Optional[Any] = alphas_cumprod[timesteps] ** 0.5 UpperCamelCase : Optional[int] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase : Optional[int] = sqrt_alpha_prod.unsqueeze(-1 ) UpperCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 UpperCamelCase : Dict = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): UpperCamelCase : Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) UpperCamelCase : Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
102
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int: _a : Optional[Any] =[] _a , _a : Union[str, Any] =0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCAmelCase ) _a , _a : Optional[Any] =b, a + b return sum(_UpperCAmelCase ) if __name__ == "__main__": print(F"{solution() = }")
694
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness snake_case = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' snake_case = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' snake_case = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' snake_case = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' snake_case = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __UpperCAmelCase ( self : Dict ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=[1, 1_0, 1_0_0] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : int=3.0 ): """simple docstring""" if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor: _snake_case = [] _snake_case = Counter() _snake_case = 0 _snake_case = defaultdict(__lowerCamelCase ) for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ): for candidate in candidates: _snake_case = candidate + '''\n''' + test_case _snake_case = (test_program, timeout, task_id, completion_id[task_id]) _snake_case = executor.submit(__lowerCamelCase , *__lowerCamelCase ) futures.append(__lowerCamelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__lowerCamelCase ): _snake_case = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) _snake_case , _snake_case = [], [] for result in results.values(): result.sort() _snake_case = [r[1]['''passed'''] for r in result] total.append(len(__lowerCamelCase ) ) correct.append(sum(__lowerCamelCase ) ) _snake_case = np.array(__lowerCamelCase ) _snake_case = np.array(__lowerCamelCase ) _snake_case = k _snake_case = {f"""pass@{k}""": estimate_pass_at_k(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: def estimator(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = itertools.repeat(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) else: assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) _snake_case = iter(lowerCAmelCase_ ) return np.array([estimator(int(lowerCAmelCase_ ) , int(lowerCAmelCase_ ) , lowerCAmelCase_ ) for n, c in zip(lowerCAmelCase_ , lowerCAmelCase_ )] )
103
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return 0 elif n == 2: return 1 else: _a : Dict =[0, 1] for i in range(2 ,n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: _a : Union[str, Any] =0 _a : Optional[Any] =2 while digits < n: index += 1 _a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) ) return index def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int: return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
694
0
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase = logging.get_logger(__name__) # General docstring UpperCamelCase = """RegNetConfig""" # Base docstring UpperCamelCase = """facebook/regnet-y-040""" UpperCamelCase = [1, 1088, 7, 7] # Image classification docstring UpperCamelCase = """facebook/regnet-y-040""" UpperCamelCase = """tabby, tabby cat""" UpperCamelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = "relu" , ) -> Any: super().__init__() A__ = nn.Convad( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , ) A__ = nn.BatchNormad(SCREAMING_SNAKE_CASE__ ) A__ = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: A__ = self.convolution(SCREAMING_SNAKE_CASE__ ) A__ = self.normalization(SCREAMING_SNAKE_CASE__ ) A__ = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ ) -> int: super().__init__() A__ = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) A__ = config.num_channels def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple: A__ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) A__ = self.embedder(SCREAMING_SNAKE_CASE__ ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 ) -> int: super().__init__() A__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) A__ = nn.BatchNormad(SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tensor: A__ = self.convolution(SCREAMING_SNAKE_CASE__ ) A__ = self.normalization(SCREAMING_SNAKE_CASE__ ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: super().__init__() A__ = nn.AdaptiveAvgPoolad((1, 1) ) A__ = nn.Sequential( nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , nn.Sigmoid() , ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> int: # b c h w -> b c 1 1 A__ = self.pooler(SCREAMING_SNAKE_CASE__ ) A__ = self.attention(SCREAMING_SNAKE_CASE__ ) A__ = hidden_state * attention return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 ) -> Tuple: super().__init__() A__ = in_channels != out_channels or stride != 1 A__ = max(1 , out_channels // config.groups_width ) A__ = ( RegNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity() ) A__ = nn.Sequential( RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , ) A__ = ACTaFN[config.hidden_act] def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]: A__ = hidden_state A__ = self.layer(SCREAMING_SNAKE_CASE__ ) A__ = self.shortcut(SCREAMING_SNAKE_CASE__ ) hidden_state += residual A__ = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 ) -> str: super().__init__() A__ = in_channels != out_channels or stride != 1 A__ = max(1 , out_channels // config.groups_width ) A__ = ( RegNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity() ) A__ = nn.Sequential( RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , ) A__ = ACTaFN[config.hidden_act] def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple: A__ = hidden_state A__ = self.layer(SCREAMING_SNAKE_CASE__ ) A__ = self.shortcut(SCREAMING_SNAKE_CASE__ ) hidden_state += residual A__ = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , ) -> Tuple: super().__init__() A__ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer A__ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(depth - 1 )] , ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: A__ = self.layers(SCREAMING_SNAKE_CASE__ ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: super().__init__() A__ = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) A__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ): self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True ) -> BaseModelOutputWithNoAttention: A__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A__ = hidden_states + (hidden_state,) A__ = stage_module(SCREAMING_SNAKE_CASE__ ) if output_hidden_states: A__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ ) class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Optional[Any] = RegNetConfig A__ : Tuple = "regnet" A__ : Tuple = "pixel_values" A__ : List[Any] = True def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str: if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[Any]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): A__ = value UpperCamelCase = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , _lowerCAmelCase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: super().__init__(SCREAMING_SNAKE_CASE__ ) A__ = config A__ = RegNetEmbeddings(SCREAMING_SNAKE_CASE__ ) A__ = RegNetEncoder(SCREAMING_SNAKE_CASE__ ) A__ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> BaseModelOutputWithPoolingAndNoAttention: A__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A__ = return_dict if return_dict is not None else self.config.use_return_dict A__ = self.embedder(SCREAMING_SNAKE_CASE__ ) A__ = self.encoder( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) A__ = encoder_outputs[0] A__ = self.pooler(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowerCAmelCase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Dict: super().__init__(SCREAMING_SNAKE_CASE__ ) A__ = config.num_labels A__ = RegNetModel(SCREAMING_SNAKE_CASE__ ) # classification head A__ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> ImageClassifierOutputWithNoAttention: A__ = return_dict if return_dict is not None else self.config.use_return_dict A__ = self.regnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) A__ = outputs.pooler_output if return_dict else outputs[1] A__ = self.classifier(SCREAMING_SNAKE_CASE__ ) A__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A__ = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A__ = "single_label_classification" else: A__ = "multi_label_classification" if self.config.problem_type == "regression": A__ = MSELoss() if self.num_labels == 1: A__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: A__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.config.problem_type == "single_label_classification": A__ = CrossEntropyLoss() A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A__ = BCEWithLogitsLoss() A__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not return_dict: A__ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
104
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str: # Initialise PyTorch model _a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase ) print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) ) _a : Dict =RemBertModel(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) ) torch.save(model.state_dict() ,_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__: Tuple = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
694
0
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="attention" ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel'] SCREAMING_SNAKE_CASE_ : str = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel'] SCREAMING_SNAKE_CASE_ : Optional[int] = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel'] SCREAMING_SNAKE_CASE_ : Union[str, Any] = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel'] return k, o, q, v def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=False ) -> Dict: """simple docstring""" if split_mlp_wi: SCREAMING_SNAKE_CASE_ : Union[str, Any] = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel'] SCREAMING_SNAKE_CASE_ : int = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel'] SCREAMING_SNAKE_CASE_ : Any = (wi_a, wi_a) else: SCREAMING_SNAKE_CASE_ : int = params[F'{prefix}/layers_{i}/mlp/wi/kernel'] SCREAMING_SNAKE_CASE_ : str = params[F'{prefix}/layers_{i}/mlp/wo/kernel'] return wi, wo def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ) -> Any: """simple docstring""" return params[F'{prefix}/layers_{i}/{layer_name}/scale'] def __UpperCAmelCase ( lowerCamelCase_ : dict , *, lowerCamelCase_ : int , lowerCamelCase_ : bool ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = traverse_util.flatten_dict(variables['target'] ) SCREAMING_SNAKE_CASE_ : str = {'/'.join(lowerCamelCase_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi SCREAMING_SNAKE_CASE_ : Tuple = 'encoder/layers_0/mlp/wi_0/kernel' in old print('Split MLP:' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = collections.OrderedDict() # Shared embeddings. SCREAMING_SNAKE_CASE_ : Union[str, Any] = old['token_embedder/embedding'] # Encoder. for i in range(lowerCamelCase_ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'pre_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'attention' ) SCREAMING_SNAKE_CASE_ : List[str] = layer_norm SCREAMING_SNAKE_CASE_ : List[Any] = k.T SCREAMING_SNAKE_CASE_ : Any = o.T SCREAMING_SNAKE_CASE_ : List[str] = q.T SCREAMING_SNAKE_CASE_ : List[Any] = v.T # Block i, layer 1 (MLP). SCREAMING_SNAKE_CASE_ : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , 'encoder' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ : Optional[Any] = wi[0].T SCREAMING_SNAKE_CASE_ : str = wi[1].T else: SCREAMING_SNAKE_CASE_ : List[str] = wi.T SCREAMING_SNAKE_CASE_ : Tuple = wo.T SCREAMING_SNAKE_CASE_ : str = old[ 'encoder/relpos_bias/rel_embedding' ].T SCREAMING_SNAKE_CASE_ : Any = old['encoder/encoder_norm/scale'] if not is_encoder_only: # Decoder. for i in range(lowerCamelCase_ ): # Block i, layer 0 (Self Attention). SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_self_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'self_attention' ) SCREAMING_SNAKE_CASE_ : int = layer_norm SCREAMING_SNAKE_CASE_ : Any = k.T SCREAMING_SNAKE_CASE_ : Tuple = o.T SCREAMING_SNAKE_CASE_ : Optional[Any] = q.T SCREAMING_SNAKE_CASE_ : Any = v.T # Block i, layer 1 (Cross Attention). SCREAMING_SNAKE_CASE_ : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'encoder_decoder_attention' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm SCREAMING_SNAKE_CASE_ : str = k.T SCREAMING_SNAKE_CASE_ : List[str] = o.T SCREAMING_SNAKE_CASE_ : Union[str, Any] = q.T SCREAMING_SNAKE_CASE_ : List[str] = v.T # Block i, layer 2 (MLP). SCREAMING_SNAKE_CASE_ : str = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , 'pre_mlp_layer_norm' ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , 'decoder' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ : Optional[int] = wi[0].T SCREAMING_SNAKE_CASE_ : Dict = wi[1].T else: SCREAMING_SNAKE_CASE_ : Optional[Any] = wi.T SCREAMING_SNAKE_CASE_ : Any = wo.T SCREAMING_SNAKE_CASE_ : Tuple = old['decoder/decoder_norm/scale'] SCREAMING_SNAKE_CASE_ : Optional[int] = old[ 'decoder/relpos_bias/rel_embedding' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: SCREAMING_SNAKE_CASE_ : Any = old['decoder/logits_dense/kernel'].T return new def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : bool ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: SCREAMING_SNAKE_CASE_ : str = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) SCREAMING_SNAKE_CASE_ : Dict = state_dict['shared.weight'] return state_dict def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = checkpoints.load_tax_checkpoint(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Dict = convert_tax_to_pytorch(lowerCamelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = make_state_dict(lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : bool = False ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = TaConfig.from_json_file(lowerCamelCase_ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(lowerCamelCase_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowerCamelCase_ ) # Verify that we can load the checkpoint. model.from_pretrained(lowerCamelCase_ ) print('Done' ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) UpperCamelCase__ : Dict = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
105
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__: Optional[int] = logging.get_logger(__name__) A__: Union[str, Any] = '''▁''' A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} A__: Optional[int] = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024} class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token _a : int ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _a : Dict =vocab_file _a : int =monolingual_vocab_file _a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _a : List[Any] ={} _a : List[str] =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[Any] =cnt cnt += 1 with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _a : int =line.strip().split()[0] _a : str =len(self.fairseq_tokens_to_ids ) if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids: _a : Optional[int] =len(self.fairseq_tokens_to_ids ) _a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :int ) -> List[Any]: '''simple docstring''' _a : Optional[int] =self.__dict__.copy() _a : Optional[Any] =None _a : str =self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str: '''simple docstring''' _a : List[str] =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple ={} _a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _a : Optional[int] =[self.cls_token_id] _a : int =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _a : List[str] =[self.sep_token_id] _a : int =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __UpperCAmelCase ( self :Optional[int] ) -> int: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __UpperCAmelCase ( self :int ) -> Union[str, Any]: '''simple docstring''' _a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]: '''simple docstring''' _a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip() return out_string def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a : int =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _a : Any =os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi: _a : Optional[Any] =self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" ) return out_vocab_file, out_monolingual_vocab_file
694
0
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase_ ( lowerCAmelCase__ : NDArray[floataa] , lowerCAmelCase__ : NDArray[floataa] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , ) -> list[float]: '''simple docstring''' A , A = coefficient_matrix.shape A , A = constant_matrix.shape if rowsa != colsa: A = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(lowerCAmelCase__ ) if colsa != 1: A = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(lowerCAmelCase__ ) if rowsa != rowsa: A = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) != rowsa: A = ( 'Number of initial values must be equal to number of rows in coefficient ' F'''matrix but received {len(lowerCAmelCase__ )} and {rowsa}''' ) raise ValueError(lowerCAmelCase__ ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) A = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) A , A = table.shape strictly_diagonally_dominant(lowerCAmelCase__ ) # Iterates the whole matrix for given number of times for _ in range(lowerCAmelCase__ ): A = [] for row in range(lowerCAmelCase__ ): A = 0 for col in range(lowerCAmelCase__ ): if col == row: A = table[row][col] elif col == cols - 1: A = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A = (temp + val) / denom new_val.append(lowerCAmelCase__ ) A = new_val return [float(lowerCAmelCase__ ) for i in new_val] def lowerCamelCase_ ( lowerCAmelCase__ : NDArray[floataa] ) -> bool: '''simple docstring''' A , A = table.shape A = True for i in range(0 , lowerCAmelCase__ ): A = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
106
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
694
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets _UpperCAmelCase : Dict = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' _UpperCAmelCase : str = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' _UpperCAmelCase : str = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ), reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'], ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict=None, UpperCamelCase__ : Dict=1, UpperCamelCase__ : List[Any]="binary", UpperCamelCase__ : str=None ) -> Optional[int]: _A = fa_score( UpperCamelCase__, UpperCamelCase__, labels=UpperCamelCase__, pos_label=UpperCamelCase__, average=UpperCamelCase__, sample_weight=UpperCamelCase__ ) return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
107
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]: _a : Dict =current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): _a : Any =row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: _a : Any =column continue _a : Union[str, Any] =column / magnitude # Subtract to cancel term _a : Optional[Any] =current_set[0] _a : List[Any] =[first_row] _a : Tuple =current_set[1::] for row in current_set: _a : Any =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: _a : List[str] =final_set[0] _a : Tuple =[] _a : Tuple =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _a : str =simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 ,current_first_column[i] ) resultant.insert(0 ,_UpperCAmelCase ) _a : List[Any] =resultant return final_set def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list: if len(_UpperCAmelCase ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _a : str =len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] _a : str =equations.copy() if any(0 in row for row in data_set ): _a : Optional[int] =data_set.copy() _a : str =[] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: _a : List[Any] =data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 ,_UpperCAmelCase ) _a : Dict =data_set.copy() _a : Any =simplify(_UpperCAmelCase ) _a : Any =simplified[::-1] _a : list =[] for row in simplified: _a : Optional[Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue _a : List[str] =temp_row[1::] _a : int =temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) _a : Tuple =[] for item in solutions: final.append(float(round(_UpperCAmelCase ,5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() A__: int = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
694
0
from collections import namedtuple import requests from lxml import html # type: ignore __a: List[str] = namedtuple('''covid_data''', '''cases deaths recovered''') def _SCREAMING_SNAKE_CASE ( __snake_case = "https://www.worldometers.info/coronavirus/" ) -> covid_data: _UpperCAmelCase = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) ) __a: Tuple = '''Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}''' print(fmt.format(*covid_stats()))
108
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__: Dict = logging.get_logger(__name__) A__: Optional[int] = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Tuple = "markuplm" def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _a : Any =vocab_size _a : List[str] =hidden_size _a : List[str] =num_hidden_layers _a : Tuple =num_attention_heads _a : Union[str, Any] =hidden_act _a : Tuple =intermediate_size _a : Optional[Any] =hidden_dropout_prob _a : int =attention_probs_dropout_prob _a : Any =max_position_embeddings _a : List[Any] =type_vocab_size _a : List[Any] =initializer_range _a : List[Any] =layer_norm_eps _a : Optional[int] =position_embedding_type _a : List[Any] =use_cache _a : List[str] =classifier_dropout # additional properties _a : int =max_depth _a : Union[str, Any] =max_xpath_tag_unit_embeddings _a : str =max_xpath_subs_unit_embeddings _a : int =tag_pad_id _a : List[Any] =subs_pad_id _a : str =xpath_unit_hidden_size
694
0
'''simple docstring''' import argparse import struct import unittest class __a : def __init__( self : str ,lowerCamelCase : bytes ): '''simple docstring''' __SCREAMING_SNAKE_CASE = data # Initialize hash values __SCREAMING_SNAKE_CASE = [ 0x6A_09E_667, 0xBB_67A_E85, 0x3C_6EF_372, 0xA5_4FF_53A, 0x51_0E5_27F, 0x9B_056_88C, 0x1F_83D_9AB, 0x5B_E0C_D19, ] # Initialize round constants __SCREAMING_SNAKE_CASE = [ 0x42_8A2_F98, 0x71_374_491, 0xB5_C0F_BCF, 0xE9_B5D_BA5, 0x39_56C_25B, 0x59_F11_1F1, 0x92_3F8_2A4, 0xAB_1C5_ED5, 0xD8_07A_A98, 0x12_835_B01, 0x24_318_5BE, 0x55_0C7_DC3, 0x72_BE5_D74, 0x80_DEB_1FE, 0x9B_DC0_6A7, 0xC1_9BF_174, 0xE4_9B6_9C1, 0xEF_BE4_786, 0x0F_C19_DC6, 0x24_0CA_1CC, 0x2D_E92_C6F, 0x4A_748_4AA, 0x5C_B0A_9DC, 0x76_F98_8DA, 0x98_3E5_152, 0xA8_31C_66D, 0xB0_032_7C8, 0xBF_597_FC7, 0xC6_E00_BF3, 0xD5_A79_147, 0x06_CA6_351, 0x14_292_967, 0x27_B70_A85, 0x2E_1B2_138, 0x4D_2C6_DFC, 0x53_380_D13, 0x65_0A7_354, 0x76_6A0_ABB, 0x81_C2C_92E, 0x92_722_C85, 0xA2_BFE_8A1, 0xA8_1A6_64B, 0xC2_4B8_B70, 0xC7_6C5_1A3, 0xD1_92E_819, 0xD6_990_624, 0xF4_0E3_585, 0x10_6AA_070, 0x19_A4C_116, 0x1E_376_C08, 0x27_487_74C, 0x34_B0B_CB5, 0x39_1C0_CB3, 0x4E_D8A_A4A, 0x5B_9CC_A4F, 0x68_2E6_FF3, 0x74_8F8_2EE, 0x78_A56_36F, 0x84_C87_814, 0x8C_C70_208, 0x90_BEF_FFA, 0xA4_506_CEB, 0xBE_F9A_3F7, 0xC6_717_8F2, ] __SCREAMING_SNAKE_CASE = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCAmelCase__ ( lowerCamelCase : bytes ): '''simple docstring''' __SCREAMING_SNAKE_CASE = b"""\x80""" + (b"""\x00""" * (63 - (len(lowerCamelCase ) + 8) % 64)) __SCREAMING_SNAKE_CASE = struct.pack(""">Q""" ,(len(lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" ,lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array __SCREAMING_SNAKE_CASE = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) __SCREAMING_SNAKE_CASE = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) __SCREAMING_SNAKE_CASE = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100_000_000 # Compression __SCREAMING_SNAKE_CASE = self.ror(lowerCamelCase ,6 ) ^ self.ror(lowerCamelCase ,11 ) ^ self.ror(lowerCamelCase ,25 ) __SCREAMING_SNAKE_CASE = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g) __SCREAMING_SNAKE_CASE = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100_000_000 __SCREAMING_SNAKE_CASE = self.ror(lowerCamelCase ,2 ) ^ self.ror(lowerCamelCase ,13 ) ^ self.ror(lowerCamelCase ,22 ) __SCREAMING_SNAKE_CASE = (a & b) ^ (a & c) ^ (b & c) __SCREAMING_SNAKE_CASE = (sa + maj) % 0x100_000_000 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( g, f, e, ((d + tempa) % 0x100_000_000), c, b, a, ((tempa + tempa) % 0x100_000_000), ) __SCREAMING_SNAKE_CASE = [a, b, c, d, e, f, g, h] # Modify final values __SCREAMING_SNAKE_CASE = [ ((element + mutated_hash_values[index]) % 0x100_000_000) for index, element in enumerate(self.hashes ) ] __SCREAMING_SNAKE_CASE = """""".join([hex(lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : int ): '''simple docstring''' return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations) class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' import hashlib __SCREAMING_SNAKE_CASE = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(lowerCamelCase ).hash ,hashlib.shaaaa(lowerCamelCase ).hexdigest() ) def __magic_name__ ( ) -> None: '''simple docstring''' import doctest doctest.testmod() __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: __SCREAMING_SNAKE_CASE = f.read() else: __SCREAMING_SNAKE_CASE = bytes(__UpperCAmelCase , """utf-8""" ) print(SHAaaa(__UpperCAmelCase ).hash ) if __name__ == "__main__": main()
109
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''') def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict: hf_model.apply_weight_norm() _a : Any =checkpoint["""input_conv.weight_g"""] _a : Union[str, Any] =checkpoint["""input_conv.weight_v"""] _a : Optional[int] =checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): _a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"] _a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"] _a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] _a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] _a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] _a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] _a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] _a : Dict =checkpoint["""output_conv.1.weight_g"""] _a : str =checkpoint["""output_conv.1.weight_v"""] _a : Union[str, Any] =checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]: if config_path is not None: _a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: _a : str =SpeechTaHifiGanConfig() _a : Tuple =SpeechTaHifiGan(_UpperCAmelCase ) _a : int =torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict =np.load(_UpperCAmelCase ) _a : Union[str, Any] =stats[0].reshape(-1 ) _a : Any =stats[1].reshape(-1 ) _a : Tuple =torch.from_numpy(_UpperCAmelCase ).float() _a : List[str] =torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__: Optional[int] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__: Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
694
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : Optional[Any] = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __lowerCAmelCase ( UpperCAmelCase__ ): """simple docstring""" _snake_case : str = "gpt_bigcode" _snake_case : List[str] = ["past_key_values"] _snake_case : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Any , lowerCAmelCase__ : Any=50257 , lowerCAmelCase__ : List[str]=1024 , lowerCAmelCase__ : str=768 , lowerCAmelCase__ : str=12 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[Any]="gelu_pytorch_tanh" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=1e-5 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Any=50256 , lowerCAmelCase__ : Optional[int]=50256 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=True , **lowerCAmelCase__ : Union[str, Any] , ) -> int: '''simple docstring''' _UpperCamelCase = vocab_size _UpperCamelCase = n_positions _UpperCamelCase = n_embd _UpperCamelCase = n_layer _UpperCamelCase = n_head _UpperCamelCase = n_inner _UpperCamelCase = activation_function _UpperCamelCase = resid_pdrop _UpperCamelCase = embd_pdrop _UpperCamelCase = attn_pdrop _UpperCamelCase = layer_norm_epsilon _UpperCamelCase = initializer_range _UpperCamelCase = scale_attn_weights _UpperCamelCase = use_cache _UpperCamelCase = attention_softmax_in_fpaa _UpperCamelCase = scale_attention_softmax_in_fpaa _UpperCamelCase = multi_query _UpperCamelCase = bos_token_id _UpperCamelCase = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
98
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
0
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): return number | (1 << position) def _snake_case ( lowercase__ , lowercase__ ): return number & ~(1 << position) def _snake_case ( lowercase__ , lowercase__ ): return number ^ (1 << position) def _snake_case ( lowercase__ , lowercase__ ): return ((number >> position) & 1) == 1 def _snake_case ( lowercase__ , lowercase__ ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
630
'''simple docstring''' A__: Optional[int] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A__: int = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
694
0
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _UpperCAmelCase ( UpperCAmelCase__): def __snake_case ( self ) -> Any: '''simple docstring''' _UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_A , """tf_padding""" ) ) self.parent.assertTrue(hasattr(_A , """depth_multiplier""" ) ) class _UpperCAmelCase : def __init__( self , _A , _A=13 , _A=3 , _A=32 , _A=0.25 , _A=8 , _A=8 , _A=6 , _A=32 , _A=True , _A=True , _A=True , _A="relu6" , _A=12_80 , _A=0.1 , _A=0.02 , _A=True , _A=True , _A=10 , _A=None , ) -> Any: '''simple docstring''' _UpperCAmelCase : Dict = parent _UpperCAmelCase : Union[str, Any] = batch_size _UpperCAmelCase : Union[str, Any] = num_channels _UpperCAmelCase : int = image_size _UpperCAmelCase : str = depth_multiplier _UpperCAmelCase : str = depth_divisible_by _UpperCAmelCase : int = min_depth _UpperCAmelCase : List[str] = expand_ratio _UpperCAmelCase : str = tf_padding _UpperCAmelCase : List[str] = output_stride _UpperCAmelCase : Union[str, Any] = first_layer_is_expansion _UpperCAmelCase : Any = finegrained_output _UpperCAmelCase : Optional[Any] = hidden_act _UpperCAmelCase : Tuple = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) _UpperCAmelCase : List[Any] = classifier_dropout_prob _UpperCAmelCase : str = use_labels _UpperCAmelCase : List[str] = is_training _UpperCAmelCase : Optional[int] = num_labels _UpperCAmelCase : Any = initializer_range _UpperCAmelCase : Union[str, Any] = scope def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase : List[str] = None _UpperCAmelCase : List[Any] = None if self.use_labels: _UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _UpperCAmelCase : List[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __snake_case ( self , _A , _A , _A , _A ) -> str: '''simple docstring''' _UpperCAmelCase : Optional[int] = MobileNetVaModel(config=_A ) model.to(_A ) model.eval() _UpperCAmelCase : Dict = model(_A ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def __snake_case ( self , _A , _A , _A , _A ) -> List[str]: '''simple docstring''' _UpperCAmelCase : int = self.num_labels _UpperCAmelCase : Tuple = MobileNetVaForImageClassification(_A ) model.to(_A ) model.eval() _UpperCAmelCase : int = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self , _A , _A , _A , _A ) -> Tuple: '''simple docstring''' _UpperCAmelCase : List[Any] = self.num_labels _UpperCAmelCase : List[Any] = MobileNetVaForSemanticSegmentation(_A ) model.to(_A ) model.eval() _UpperCAmelCase : int = model(_A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _UpperCAmelCase : Dict = model(_A , labels=_A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() _UpperCAmelCase : Union[str, Any] = config_and_inputs _UpperCAmelCase : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase): __a : Union[str, Any] = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) __a : int = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) __a : List[Any] = False __a : int = False __a : Tuple = False __a : Union[str, Any] = False def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : Tuple = MobileNetVaModelTester(self ) _UpperCAmelCase : Any = MobileNetVaConfigTester(self , config_class=_A , has_text_modality=_A ) def __snake_case ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" ) def __snake_case ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" ) def __snake_case ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason="""MobileNetV2 does not output attentions""" ) def __snake_case ( self ) -> int: '''simple docstring''' pass def __snake_case ( self ) -> Any: '''simple docstring''' _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : Dict = model_class(_A ) _UpperCAmelCase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()] _UpperCAmelCase : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A ) def __snake_case ( self ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __snake_case ( self ) -> int: '''simple docstring''' def check_hidden_states_output(_A , _A , _A ): _UpperCAmelCase : Dict = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): _UpperCAmelCase : Dict = model(**self._prepare_for_class(_A , _A ) ) _UpperCAmelCase : Dict = outputs.hidden_states _UpperCAmelCase : str = 16 self.assertEqual(len(_A ) , _A ) _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : List[Any] = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase : List[str] = True check_hidden_states_output(_A , _A , _A ) def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __snake_case ( self ) -> str: '''simple docstring''' _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_A ) @slow def __snake_case ( self ) -> Tuple: '''simple docstring''' for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Union[str, Any] = MobileNetVaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCamelCase ( ) -> List[Any]: _UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase): @cached_property def __snake_case ( self ) -> Any: '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None ) @slow def __snake_case ( self ) -> str: '''simple docstring''' _UpperCAmelCase : Any = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(_A ) _UpperCAmelCase : List[str] = self.default_image_processor _UpperCAmelCase : List[str] = prepare_img() _UpperCAmelCase : List[str] = image_processor(images=_A , return_tensors="""pt""" ).to(_A ) # forward pass with torch.no_grad(): _UpperCAmelCase : str = model(**_A ) # verify the logits _UpperCAmelCase : Dict = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , _A ) _UpperCAmelCase : Optional[int] = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) ) @slow def __snake_case ( self ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) _UpperCAmelCase : Any = model.to(_A ) _UpperCAmelCase : Optional[Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" ) _UpperCAmelCase : str = prepare_img() _UpperCAmelCase : List[str] = image_processor(images=_A , return_tensors="""pt""" ).to(_A ) # forward pass with torch.no_grad(): _UpperCAmelCase : Any = model(**_A ) _UpperCAmelCase : Dict = outputs.logits # verify the logits _UpperCAmelCase : List[str] = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , _A ) _UpperCAmelCase : int = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=_A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) )
238
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"{price_plus_tax(100, 0.25) = }") print(F"{price_plus_tax(125.50, 0.05) = }")
694
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __lowercase : Optional[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
36
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A__ ( unittest.TestCase ): def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple: '''simple docstring''' _a : Optional[Any] =parent _a : List[str] =batch_size _a : List[str] =seq_length _a : List[Any] =is_training _a : Optional[int] =use_attention_mask _a : List[Any] =use_token_type_ids _a : List[Any] =use_labels _a : Optional[Any] =vocab_size _a : str =hidden_size _a : List[Any] =num_hidden_layers _a : List[Any] =num_attention_heads _a : Union[str, Any] =intermediate_size _a : int =hidden_act _a : List[str] =hidden_dropout_prob _a : Optional[int] =attention_probs_dropout_prob _a : Dict =max_position_embeddings _a : Any =type_vocab_size _a : str =type_sequence_label_size _a : str =initializer_range _a : List[str] =num_choices def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict: '''simple docstring''' _a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a : Dict =None if self.use_attention_mask: _a : Any =random_attention_mask([self.batch_size, self.seq_length] ) _a : Optional[int] =None if self.use_token_type_ids: _a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a : Union[str, Any] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' _a : Tuple =self.prepare_config_and_inputs() _a , _a , _a , _a : List[Any] =config_and_inputs _a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __UpperCAmelCase ( self :int ) -> str: '''simple docstring''' _a : List[Any] =self.prepare_config_and_inputs() _a , _a , _a , _a : Optional[int] =config_and_inputs _a : Tuple =True _a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A__ ( UpperCAmelCase__ , unittest.TestCase ): __UpperCamelCase : Union[str, Any] = True __UpperCamelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self :List[str] ) -> Optional[int]: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def __UpperCAmelCase ( self :str ) -> int: '''simple docstring''' for model_class_name in self.all_model_classes: _a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Dict =model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class A__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self :Any ) -> str: '''simple docstring''' _a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Dict =model(SCREAMING_SNAKE_CASE )[0] _a : List[Any] =[1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _a : Any =np.array( [[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __UpperCAmelCase ( self :int ) -> int: '''simple docstring''' _a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE ) _a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) _a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. _a : str =np.array( [[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
694
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__ ( self ) -> List[str]: """simple docstring""" lowercase_ : Optional[Any] = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) lowercase_ : Optional[int] = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) model.to(snake_case__ ) from datasets import load_dataset lowercase_ : Optional[Any] = load_dataset("""nielsr/rvlcdip-demo""" ) lowercase_ : Optional[Any] = dataset["""train"""][0]["""image"""].convert("""RGB""" ) lowercase_ : Union[str, Any] = image_processor(snake_case__, return_tensors="""pt""" ).to(snake_case__ ) # forward pass with torch.no_grad(): lowercase_ : List[str] = model(**snake_case__ ) lowercase_ : Any = outputs.logits lowercase_ : int = torch.Size((1, 16) ) self.assertEqual(logits.shape, snake_case__ ) lowercase_ : int = torch.tensor( [-0.4158, -0.4092, -0.4347], device=snake_case__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], snake_case__, atol=1E-4 ) )
458
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A__: Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]: return field(default_factory=lambda: default ,metadata=_UpperCAmelCase ) @dataclass class A__ : __UpperCamelCase : List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) __UpperCamelCase : List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) __UpperCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} ) __UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) __UpperCamelCase : str = field( default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) __UpperCamelCase : str = field( default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) __UpperCamelCase : str = field( default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) __UpperCamelCase : str = field( default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) __UpperCamelCase : str = field( default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) __UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} ) __UpperCamelCase : bool = field( default=UpperCAmelCase__ , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , ) def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCAmelCase ( self :Optional[int] ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def __UpperCAmelCase ( self :Optional[Any] ) -> int: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
694
0
"""simple docstring""" class __lowerCAmelCase : '''simple docstring''' def __init__( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: List[str] ): UpperCamelCase_ =None UpperCamelCase_ =None UpperCamelCase_ =graph self._normalize_graph(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase_ =len(UpperCamelCase_ ) UpperCamelCase_ =None def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] ): if sources is int: UpperCamelCase_ =[sources] if sinks is int: UpperCamelCase_ =[sinks] if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) == 0: return UpperCamelCase_ =sources[0] UpperCamelCase_ =sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCamelCase_ ) > 1 or len(UpperCamelCase_ ) > 1: UpperCamelCase_ =0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase_ =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase_ =max_input_flow UpperCamelCase_ =0 UpperCamelCase_ =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase_ =max_input_flow UpperCamelCase_ =size - 1 def UpperCamelCase__ ( self: Optional[int] ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCamelCase__ ( self: Any , UpperCamelCase_: Dict ): UpperCamelCase_ =algorithm(self ) class __lowerCAmelCase : '''simple docstring''' def __init__( self: Tuple , UpperCamelCase_: Optional[Any] ): UpperCamelCase_ =flow_network UpperCamelCase_ =flow_network.verticesCount UpperCamelCase_ =flow_network.sourceIndex UpperCamelCase_ =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase_ =flow_network.graph UpperCamelCase_ =False def UpperCamelCase__ ( self: List[Any] ): if not self.executed: self._algorithm() UpperCamelCase_ =True def UpperCamelCase__ ( self: Union[str, Any] ): pass class __lowerCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self: int , UpperCamelCase_: str ): super().__init__(UpperCamelCase_ ) # use this to save your result UpperCamelCase_ =-1 def UpperCamelCase__ ( self: Dict ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class __lowerCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self: str , UpperCamelCase_: Tuple ): super().__init__(UpperCamelCase_ ) UpperCamelCase_ =[[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase_ =[0] * self.verticies_count UpperCamelCase_ =[0] * self.verticies_count def UpperCamelCase__ ( self: Optional[int] ): UpperCamelCase_ =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase_ =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase_ =0 while i < len(UpperCamelCase_ ): UpperCamelCase_ =vertices_list[i] UpperCamelCase_ =self.heights[vertex_index] self.process_vertex(UpperCamelCase_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCamelCase_ ) ) UpperCamelCase_ =0 else: i += 1 UpperCamelCase_ =sum(self.preflow[self.source_index] ) def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCamelCase_ , UpperCamelCase_ ) self.relabel(UpperCamelCase_ ) def UpperCamelCase__ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ): UpperCamelCase_ =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCamelCase__ ( self: Any , UpperCamelCase_: Any ): UpperCamelCase_ =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase_ =self.heights[to_index] if min_height is not None: UpperCamelCase_ =min_height + 1 if __name__ == "__main__": A_ = [0] A_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A_ = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
391
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class A__ ( UpperCAmelCase__ ): def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]: '''simple docstring''' _a : int =1.0 if scale is None else scale _a : Optional[Any] =0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] ) @property def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return self.base_dist.mean * self.scale + self.loc @property def __UpperCAmelCase ( self :Optional[int] ) -> Dict: '''simple docstring''' return self.base_dist.variance * self.scale**2 @property def __UpperCAmelCase ( self :Any ) -> List[str]: '''simple docstring''' return self.variance.sqrt() class A__ ( nn.Module ): def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) _a : Tuple =args_dim _a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] ) _a : Dict =domain_map def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]: '''simple docstring''' _a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE ) class A__ ( nn.Module ): def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int: '''simple docstring''' super().__init__() _a : List[Any] =function def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]: '''simple docstring''' return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) class A__ : __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None: '''simple docstring''' _a : Any =dim _a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim} def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]: '''simple docstring''' if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution: '''simple docstring''' _a : str =self._base_distribution(SCREAMING_SNAKE_CASE ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim ) @property def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' return () if self.dim == 1 else (self.dim,) @property def __UpperCAmelCase ( self :Any ) -> int: '''simple docstring''' return len(self.event_shape ) @property def __UpperCAmelCase ( self :Any ) -> float: '''simple docstring''' return 0.0 def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module: '''simple docstring''' return ParameterProjection( in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any: '''simple docstring''' raise NotImplementedError() @staticmethod def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor: '''simple docstring''' return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0 class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]: '''simple docstring''' _a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) _a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict: '''simple docstring''' _a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class A__ ( UpperCAmelCase__ ): __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]: '''simple docstring''' _a : int =cls.squareplus(SCREAMING_SNAKE_CASE ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution: '''simple docstring''' _a , _a : Any =distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution: '''simple docstring''' _a , _a : Optional[int] =distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
694
0
"""simple docstring""" def A ( snake_case__ = 10_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = 1, 1 SCREAMING_SNAKE_CASE__ = 2 while True: SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = fa + fa SCREAMING_SNAKE_CASE__ = fa, f index += 1 for _ in str(_UpperCAmelCase ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
196
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number | (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number & ~(1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return number ^ (1 << position) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: return ((number >> position) & 1) == 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
a_ :int = [ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
35
'''simple docstring''' def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None: if point: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for item in point: if not isinstance(_UpperCAmelCase ,(int, float) ): _a : str =( """Expected a list of numbers as input, found """ F"{type(_UpperCAmelCase ).__name__}" ) raise TypeError(_UpperCAmelCase ) else: _a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}" raise TypeError(_UpperCAmelCase ) else: raise ValueError("""Missing an input""" ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float: _validate_point(_UpperCAmelCase ) _validate_point(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
694
0
import argparse import os import re lowercase = '''src/diffusers''' # Pattern that looks at the indentation in a line. lowercase = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase = re.compile(r'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase = re.compile(r'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase = re.compile(r'''\[([^\]]+)\]''') def __lowerCAmelCase ( UpperCAmelCase__ : Dict ) -> Any: lowerCamelCase_ = _re_indent.search(_UpperCAmelCase ) return "" if search is None else search.groups()[0] def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]="" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None ) -> int: lowerCamelCase_ = 0 lowerCamelCase_ = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_UpperCAmelCase ): index += 1 lowerCamelCase_ = ["""\n""".join(lines[:index] )] else: lowerCamelCase_ = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCamelCase_ = [lines[index]] index += 1 while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_UpperCAmelCase ) ) if index < len(_UpperCAmelCase ) - 1: lowerCamelCase_ = [lines[index + 1]] index += 1 else: lowerCamelCase_ = [] else: blocks.append("""\n""".join(_UpperCAmelCase ) ) lowerCamelCase_ = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_UpperCAmelCase ) > 0: blocks.append("""\n""".join(_UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_UpperCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __lowerCAmelCase ( UpperCAmelCase__ : str ) -> Dict: def _inner(UpperCAmelCase__ : Tuple ): return key(_UpperCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __lowerCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None ) -> Optional[Any]: # If no key is provided, we use a noop. def noop(UpperCAmelCase__ : List[str] ): return x if key is None: lowerCamelCase_ = noop # Constants are all uppercase, they go first. lowerCamelCase_ = [obj for obj in objects if key(_UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCamelCase_ = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. lowerCamelCase_ = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()] lowerCamelCase_ = ignore_underscore(_UpperCAmelCase ) return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) def __lowerCAmelCase ( UpperCAmelCase__ : Any ) -> Optional[int]: # This inner function sort imports between [ ]. def _replace(UpperCAmelCase__ : List[Any] ): lowerCamelCase_ = match.groups()[0] if "," not in imports: return F'''[{imports}]''' lowerCamelCase_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ = keys[:-1] return "[" + ", ".join([F'''\"{k}\"''' for k in sort_objects(_UpperCAmelCase )] ) + "]" lowerCamelCase_ = import_statement.split("""\n""" ) if len(_UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCamelCase_ = 2 if lines[1].strip() == """[""" else 1 lowerCamelCase_ = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCamelCase_ = sort_objects(_UpperCAmelCase , key=lambda UpperCAmelCase__ : x[1] ) lowerCamelCase_ = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCamelCase_ = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCamelCase_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCamelCase_ = keys[:-1] lowerCamelCase_ = get_indent(lines[1] ) + """, """.join([F'''\"{k}\"''' for k in sort_objects(_UpperCAmelCase )] ) return "\n".join(_UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line lowerCamelCase_ = _re_bracket_content.sub(_replace , _UpperCAmelCase ) return import_statement def __lowerCAmelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=True ) -> int: with open(_UpperCAmelCase , """r""" ) as f: lowerCamelCase_ = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCamelCase_ = split_code_in_indented_blocks( _UpperCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCamelCase_ = main_blocks[block_idx] lowerCamelCase_ = block.split("""\n""" ) # Get to the start of the imports. lowerCamelCase_ = 0 while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCamelCase_ = len(_UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(_UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. lowerCamelCase_ = """\n""".join(block_lines[line_idx:-1] ) lowerCamelCase_ = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCamelCase_ = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCamelCase_ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCamelCase_ = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCamelCase_ = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None] lowerCamelCase_ = [x[0] for x in sorted(_UpperCAmelCase , key=lambda UpperCAmelCase__ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCamelCase_ = 0 lowerCamelCase_ = [] for i in range(len(_UpperCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCamelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. lowerCamelCase_ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_UpperCAmelCase ): if check_only: return True else: print(F'''Overwriting {file}.''' ) with open(_UpperCAmelCase , """w""" ) as f: f.write("""\n""".join(_UpperCAmelCase ) ) def __lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any]=True ) -> str: lowerCamelCase_ = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: lowerCamelCase_ = sort_imports(os.path.join(_UpperCAmelCase , """__init__.py""" ) , check_only=_UpperCAmelCase ) if result: lowerCamelCase_ = [os.path.join(_UpperCAmelCase , """__init__.py""" )] if len(_UpperCAmelCase ) > 0: raise ValueError(F'''Would overwrite {len(_UpperCAmelCase )} files, run `make style`.''' ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
272
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
0
"""simple docstring""" import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: str = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) lowercase__: Optional[int] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) lowercase__: int = """The dog is cute and lives in the garden house""" lowercase__: str = jnp.array([tokenizer.encode(_UpperCAmelCase )] ) lowercase__: int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim lowercase__: List[Any] = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) lowercase__: str = model(_UpperCAmelCase )["""last_hidden_state"""] self.assertEqual(output.shape , _UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
586
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue _a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf ) _a : int =cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _a : Tuple =new_cost_f _a : Optional[Any] =v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _a : str =cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int: _a : Optional[Any] =-1 _a : List[str] =set() _a : Optional[int] =set() _a : Optional[int] ={source: 0} _a : List[str] ={destination: 0} _a : Union[str, Any] ={source: None} _a : Dict ={destination: None} _a : PriorityQueue[Any] =PriorityQueue() _a : PriorityQueue[Any] =PriorityQueue() _a : Optional[int] =np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _a , _a : str =queue_forward.get() visited_forward.add(_UpperCAmelCase ) _a , _a : List[Any] =queue_backward.get() visited_backward.add(_UpperCAmelCase ) _a : int =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) _a : Any =pass_and_relaxation( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _a : Any =shortest_distance return shortest_path_distance A__: Union[str, Any] = { '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__: str = { '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
694
0